cdp_txrx_cmn.h 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484
  1. /*
  2. * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * @file cdp_txrx_cmn.h
  28. * @brief Define the host data path converged API functions
  29. * called by the host control SW and the OS interface module
  30. */
  31. #ifndef _CDP_TXRX_CMN_H_
  32. #define _CDP_TXRX_CMN_H_
  33. #include "qdf_types.h"
  34. #include "qdf_nbuf.h"
  35. #include "cdp_txrx_ops.h"
  36. #include "cdp_txrx_handle.h"
  37. #include "cdp_txrx_cmn_struct.h"
  38. /******************************************************************************
  39. *
  40. * Common Data Path Header File
  41. *
  42. *****************************************************************************/
  43. static inline int
  44. cdp_soc_attach_target(ol_txrx_soc_handle soc)
  45. {
  46. if (!soc || !soc->ops) {
  47. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  48. "%s: Invalid Instance:", __func__);
  49. QDF_BUG(0);
  50. return 0;
  51. }
  52. if (!soc->ops->cmn_drv_ops ||
  53. !soc->ops->cmn_drv_ops->txrx_soc_attach_target)
  54. return 0;
  55. return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc);
  56. }
  57. static inline int
  58. cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc)
  59. {
  60. if (!soc || !soc->ops) {
  61. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  62. "%s: Invalid Instance:", __func__);
  63. QDF_BUG(0);
  64. return 0;
  65. }
  66. if (!soc->ops->cmn_drv_ops ||
  67. !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg)
  68. return 0;
  69. return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc);
  70. }
  71. static inline void
  72. cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config)
  73. {
  74. if (!soc || !soc->ops) {
  75. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  76. "%s: Invalid Instance:", __func__);
  77. QDF_BUG(0);
  78. return;
  79. }
  80. if (!soc->ops->cmn_drv_ops ||
  81. !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg)
  82. return;
  83. soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config);
  84. }
  85. static inline struct cdp_vdev *
  86. cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  87. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  88. {
  89. if (!soc || !soc->ops) {
  90. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  91. "%s: Invalid Instance:", __func__);
  92. QDF_BUG(0);
  93. return NULL;
  94. }
  95. if (!soc->ops->cmn_drv_ops ||
  96. !soc->ops->cmn_drv_ops->txrx_vdev_attach)
  97. return NULL;
  98. return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev,
  99. vdev_mac_addr, vdev_id, op_mode);
  100. }
  101. static inline void
  102. cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  103. ol_txrx_vdev_delete_cb callback, void *cb_context)
  104. {
  105. if (!soc || !soc->ops) {
  106. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  107. "%s: Invalid Instance:", __func__);
  108. QDF_BUG(0);
  109. return;
  110. }
  111. if (!soc->ops->cmn_drv_ops ||
  112. !soc->ops->cmn_drv_ops->txrx_vdev_detach)
  113. return;
  114. soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev,
  115. callback, cb_context);
  116. }
  117. static inline int
  118. cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
  119. {
  120. if (!soc || !soc->ops) {
  121. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  122. "%s: Invalid Instance:", __func__);
  123. QDF_BUG(0);
  124. return 0;
  125. }
  126. if (!soc->ops->cmn_drv_ops ||
  127. !soc->ops->cmn_drv_ops->txrx_pdev_attach_target)
  128. return 0;
  129. return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev);
  130. }
  131. static inline struct cdp_pdev *cdp_pdev_attach
  132. (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
  133. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
  134. {
  135. if (!soc || !soc->ops) {
  136. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  137. "%s: Invalid Instance:", __func__);
  138. QDF_BUG(0);
  139. return NULL;
  140. }
  141. if (!soc->ops->cmn_drv_ops ||
  142. !soc->ops->cmn_drv_ops->txrx_pdev_attach)
  143. return NULL;
  144. return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev,
  145. htc_pdev, osdev, pdev_id);
  146. }
  147. static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc,
  148. struct cdp_pdev *pdev)
  149. {
  150. if (!soc || !soc->ops) {
  151. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  152. "%s: Invalid Instance:", __func__);
  153. QDF_BUG(0);
  154. return 0;
  155. }
  156. if (!soc->ops->cmn_drv_ops ||
  157. !soc->ops->cmn_drv_ops->txrx_pdev_post_attach)
  158. return 0;
  159. return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev);
  160. }
  161. static inline void
  162. cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  163. {
  164. if (!soc || !soc->ops) {
  165. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  166. "%s: Invalid Instance:", __func__);
  167. QDF_BUG(0);
  168. return;
  169. }
  170. if (!soc->ops->cmn_drv_ops ||
  171. !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach)
  172. return;
  173. soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force);
  174. }
  175. static inline void
  176. cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  177. {
  178. if (!soc || !soc->ops) {
  179. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  180. "%s: Invalid Instance:", __func__);
  181. QDF_BUG(0);
  182. return;
  183. }
  184. if (!soc->ops->cmn_drv_ops ||
  185. !soc->ops->cmn_drv_ops->txrx_pdev_detach)
  186. return;
  187. soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force);
  188. }
  189. static inline void *cdp_peer_create
  190. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  191. uint8_t *peer_mac_addr)
  192. {
  193. if (!soc || !soc->ops) {
  194. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  195. "%s: Invalid Instance:", __func__);
  196. QDF_BUG(0);
  197. return NULL;
  198. }
  199. if (!soc->ops->cmn_drv_ops ||
  200. !soc->ops->cmn_drv_ops->txrx_peer_create)
  201. return NULL;
  202. return soc->ops->cmn_drv_ops->txrx_peer_create(vdev,
  203. peer_mac_addr);
  204. }
  205. static inline void cdp_peer_setup
  206. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  207. {
  208. if (!soc || !soc->ops) {
  209. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  210. "%s: Invalid Instance:", __func__);
  211. QDF_BUG(0);
  212. return;
  213. }
  214. if (!soc->ops->cmn_drv_ops ||
  215. !soc->ops->cmn_drv_ops->txrx_peer_setup)
  216. return;
  217. soc->ops->cmn_drv_ops->txrx_peer_setup(vdev,
  218. peer);
  219. }
  220. static inline int cdp_peer_add_ast
  221. (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle,
  222. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags)
  223. {
  224. if (!soc || !soc->ops) {
  225. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  226. "%s: Invalid Instance:", __func__);
  227. QDF_BUG(0);
  228. return 0;
  229. }
  230. if (!soc->ops->cmn_drv_ops ||
  231. !soc->ops->cmn_drv_ops->txrx_peer_add_ast)
  232. return 0;
  233. return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc,
  234. peer_handle,
  235. mac_addr,
  236. type,
  237. flags);
  238. }
  239. static inline int cdp_peer_update_ast
  240. (ol_txrx_soc_handle soc, void *ast_handle,
  241. struct cdp_peer *peer_handle, uint32_t flags)
  242. {
  243. if (!soc || !soc->ops) {
  244. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  245. "%s: Invalid Instance:", __func__);
  246. QDF_BUG(0);
  247. return 0;
  248. }
  249. if (!soc->ops->cmn_drv_ops ||
  250. !soc->ops->cmn_drv_ops->txrx_peer_update_ast)
  251. return 0;
  252. return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc,
  253. peer_handle,
  254. ast_handle,
  255. flags);
  256. }
  257. static inline void cdp_peer_del_ast
  258. (ol_txrx_soc_handle soc, void *ast_handle)
  259. {
  260. if (!soc || !soc->ops) {
  261. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  262. "%s: Invalid Instance:", __func__);
  263. QDF_BUG(0);
  264. return;
  265. }
  266. if (!soc->ops->cmn_drv_ops ||
  267. !soc->ops->cmn_drv_ops->txrx_peer_del_ast)
  268. return;
  269. soc->ops->cmn_drv_ops->txrx_peer_del_ast(soc, ast_handle);
  270. }
  271. static inline void *cdp_peer_ast_hash_find
  272. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr)
  273. {
  274. if (!soc || !soc->ops) {
  275. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  276. "%s: Invalid Instance:", __func__);
  277. QDF_BUG(0);
  278. return NULL;
  279. }
  280. if (!soc->ops->cmn_drv_ops ||
  281. !soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find)
  282. return NULL;
  283. return soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find(soc,
  284. ast_mac_addr);
  285. }
  286. static inline uint8_t cdp_peer_ast_get_pdev_id
  287. (ol_txrx_soc_handle soc, void *ast_handle)
  288. {
  289. if (!soc || !soc->ops) {
  290. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  291. "%s: Invalid Instance:", __func__);
  292. QDF_BUG(0);
  293. return 0xff;
  294. }
  295. if (!soc->ops->cmn_drv_ops ||
  296. !soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id)
  297. return 0xff;
  298. return soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id(soc,
  299. ast_handle);
  300. }
  301. static inline uint8_t cdp_peer_ast_get_next_hop
  302. (ol_txrx_soc_handle soc, void *ast_handle)
  303. {
  304. if (!soc || !soc->ops) {
  305. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  306. "%s: Invalid Instance:", __func__);
  307. QDF_BUG(0);
  308. return 0xff;
  309. }
  310. if (!soc->ops->cmn_drv_ops ||
  311. !soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop)
  312. return 0xff;
  313. return soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop(soc,
  314. ast_handle);
  315. }
  316. static inline void cdp_peer_ast_set_type
  317. (ol_txrx_soc_handle soc, void *ast_handle,
  318. enum cdp_txrx_ast_entry_type type)
  319. {
  320. if (!soc || !soc->ops) {
  321. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  322. "%s: Invalid Instance:", __func__);
  323. QDF_BUG(0);
  324. return;
  325. }
  326. if (!soc->ops->cmn_drv_ops ||
  327. !soc->ops->cmn_drv_ops->txrx_peer_ast_set_type)
  328. return;
  329. soc->ops->cmn_drv_ops->txrx_peer_ast_set_type(soc, ast_handle, type);
  330. }
  331. static inline void cdp_peer_teardown
  332. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  333. {
  334. if (!soc || !soc->ops) {
  335. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  336. "%s: Invalid Instance:", __func__);
  337. QDF_BUG(0);
  338. return;
  339. }
  340. if (!soc->ops->cmn_drv_ops ||
  341. !soc->ops->cmn_drv_ops->txrx_peer_teardown)
  342. return;
  343. soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer);
  344. }
  345. static inline void
  346. cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap)
  347. {
  348. if (!soc || !soc->ops) {
  349. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  350. "%s: Invalid Instance:", __func__);
  351. QDF_BUG(0);
  352. return;
  353. }
  354. if (!soc->ops->cmn_drv_ops ||
  355. !soc->ops->cmn_drv_ops->txrx_peer_delete)
  356. return;
  357. soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap);
  358. }
  359. static inline int
  360. cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  361. uint8_t smart_monitor)
  362. {
  363. if (!soc || !soc->ops) {
  364. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  365. "%s: Invalid Instance:", __func__);
  366. QDF_BUG(0);
  367. return 0;
  368. }
  369. if (!soc->ops->cmn_drv_ops ||
  370. !soc->ops->cmn_drv_ops->txrx_set_monitor_mode)
  371. return 0;
  372. return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev,
  373. smart_monitor);
  374. }
  375. static inline void
  376. cdp_set_curchan(ol_txrx_soc_handle soc,
  377. struct cdp_pdev *pdev,
  378. uint32_t chan_mhz)
  379. {
  380. if (!soc || !soc->ops) {
  381. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  382. "%s: Invalid Instance:", __func__);
  383. QDF_BUG(0);
  384. return;
  385. }
  386. if (!soc->ops->cmn_drv_ops ||
  387. !soc->ops->cmn_drv_ops->txrx_set_curchan)
  388. return;
  389. soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz);
  390. }
  391. static inline void
  392. cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  393. void *filter, uint32_t num)
  394. {
  395. if (!soc || !soc->ops) {
  396. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  397. "%s: Invalid Instance:", __func__);
  398. QDF_BUG(0);
  399. return;
  400. }
  401. if (!soc->ops->cmn_drv_ops ||
  402. !soc->ops->cmn_drv_ops->txrx_set_privacy_filters)
  403. return;
  404. soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev,
  405. filter, num);
  406. }
  407. static inline int
  408. cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  409. struct cdp_monitor_filter *filter_val)
  410. {
  411. if (soc->ops->mon_ops->txrx_set_advance_monitor_filter)
  412. return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev,
  413. filter_val);
  414. return 0;
  415. }
  416. /******************************************************************************
  417. * Data Interface (B Interface)
  418. *****************************************************************************/
  419. static inline void
  420. cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  421. void *osif_vdev, struct ol_txrx_ops *txrx_ops)
  422. {
  423. if (!soc || !soc->ops) {
  424. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  425. "%s: Invalid Instance:", __func__);
  426. QDF_BUG(0);
  427. return;
  428. }
  429. if (!soc->ops->cmn_drv_ops ||
  430. !soc->ops->cmn_drv_ops->txrx_vdev_register)
  431. return;
  432. soc->ops->cmn_drv_ops->txrx_vdev_register(vdev,
  433. osif_vdev, txrx_ops);
  434. }
  435. static inline int
  436. cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  437. qdf_nbuf_t tx_mgmt_frm, uint8_t type)
  438. {
  439. if (!soc || !soc->ops) {
  440. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  441. "%s: Invalid Instance:", __func__);
  442. QDF_BUG(0);
  443. return 0;
  444. }
  445. if (!soc->ops->cmn_drv_ops ||
  446. !soc->ops->cmn_drv_ops->txrx_mgmt_send)
  447. return 0;
  448. return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev,
  449. tx_mgmt_frm, type);
  450. }
  451. static inline int
  452. cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  453. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  454. uint8_t use_6mbps, uint16_t chanfreq)
  455. {
  456. if (!soc || !soc->ops) {
  457. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  458. "%s: Invalid Instance:", __func__);
  459. QDF_BUG(0);
  460. return 0;
  461. }
  462. if (!soc->ops->cmn_drv_ops ||
  463. !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext)
  464. return 0;
  465. return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext
  466. (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq);
  467. }
  468. static inline void
  469. cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  470. uint8_t type, ol_txrx_mgmt_tx_cb download_cb,
  471. ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
  472. {
  473. if (!soc || !soc->ops) {
  474. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  475. "%s: Invalid Instance:", __func__);
  476. QDF_BUG(0);
  477. return;
  478. }
  479. if (!soc->ops->cmn_drv_ops ||
  480. !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set)
  481. return;
  482. soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set
  483. (pdev, type, download_cb, ota_ack_cb, ctxt);
  484. }
  485. static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc,
  486. struct cdp_pdev *pdev)
  487. {
  488. if (!soc || !soc->ops) {
  489. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  490. "%s: Invalid Instance:", __func__);
  491. QDF_BUG(0);
  492. return 0;
  493. }
  494. if (!soc->ops->cmn_drv_ops ||
  495. !soc->ops->cmn_drv_ops->txrx_get_tx_pending)
  496. return 0;
  497. return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev);
  498. }
  499. static inline void
  500. cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev,
  501. ol_txrx_data_tx_cb callback, void *ctxt)
  502. {
  503. if (!soc || !soc->ops) {
  504. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  505. "%s: Invalid Instance:", __func__);
  506. QDF_BUG(0);
  507. return;
  508. }
  509. if (!soc->ops->cmn_drv_ops ||
  510. !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set)
  511. return;
  512. soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev,
  513. callback, ctxt);
  514. }
  515. /******************************************************************************
  516. * Statistics and Debugging Interface (C Inteface)
  517. *****************************************************************************/
  518. /**
  519. * External Device physical address types
  520. *
  521. * Currently, both MAC and IPA uController use the same size addresses
  522. * and descriptors are exchanged between these two depending on the mode.
  523. *
  524. * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA
  525. * operations. However, external device physical address sizes
  526. * may be different from host-specific physical address sizes.
  527. * This calls for the following definitions for target devices
  528. * (MAC, IPA uc).
  529. */
  530. #if HTT_PADDR64
  531. typedef uint64_t target_paddr_t;
  532. #else
  533. typedef uint32_t target_paddr_t;
  534. #endif /*HTT_PADDR64 */
  535. static inline int
  536. cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  537. int max_subfrms_ampdu,
  538. int max_subfrms_amsdu)
  539. {
  540. if (!soc || !soc->ops) {
  541. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  542. "%s: Invalid Instance:", __func__);
  543. QDF_BUG(0);
  544. return 0;
  545. }
  546. if (!soc->ops->cmn_drv_ops ||
  547. !soc->ops->cmn_drv_ops->txrx_aggr_cfg)
  548. return 0;
  549. return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev,
  550. max_subfrms_ampdu, max_subfrms_amsdu);
  551. }
  552. static inline int
  553. cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  554. struct ol_txrx_stats_req *req, bool per_vdev,
  555. bool response_expected)
  556. {
  557. if (!soc || !soc->ops) {
  558. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  559. "%s: Invalid Instance:", __func__);
  560. QDF_BUG(0);
  561. return 0;
  562. }
  563. if (!soc->ops->cmn_drv_ops ||
  564. !soc->ops->cmn_drv_ops->txrx_fw_stats_get)
  565. return 0;
  566. return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req,
  567. per_vdev, response_expected);
  568. }
  569. static inline int
  570. cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs)
  571. {
  572. if (!soc || !soc->ops) {
  573. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  574. "%s: Invalid Instance:", __func__);
  575. QDF_BUG(0);
  576. return 0;
  577. }
  578. if (!soc->ops->cmn_drv_ops ||
  579. !soc->ops->cmn_drv_ops->txrx_debug)
  580. return 0;
  581. return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs);
  582. }
  583. static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc,
  584. struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val)
  585. {
  586. if (!soc || !soc->ops) {
  587. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  588. "%s: Invalid Instance:", __func__);
  589. QDF_BUG(0);
  590. return;
  591. }
  592. if (!soc->ops->cmn_drv_ops ||
  593. !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg)
  594. return;
  595. soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev,
  596. cfg_stats_type, cfg_val);
  597. }
  598. static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level)
  599. {
  600. if (!soc || !soc->ops) {
  601. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  602. "%s: Invalid Instance:", __func__);
  603. QDF_BUG(0);
  604. return;
  605. }
  606. if (!soc->ops->cmn_drv_ops ||
  607. !soc->ops->cmn_drv_ops->txrx_print_level_set)
  608. return;
  609. soc->ops->cmn_drv_ops->txrx_print_level_set(level);
  610. }
  611. static inline uint8_t *
  612. cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  613. {
  614. if (!soc || !soc->ops) {
  615. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  616. "%s: Invalid Instance:", __func__);
  617. QDF_BUG(0);
  618. return NULL;
  619. }
  620. if (!soc->ops->cmn_drv_ops ||
  621. !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr)
  622. return NULL;
  623. return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev);
  624. }
  625. /**
  626. * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  627. * vdev
  628. * @vdev: vdev handle
  629. *
  630. * Return: Handle to struct qdf_mac_addr
  631. */
  632. static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr
  633. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  634. {
  635. if (!soc || !soc->ops) {
  636. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  637. "%s: Invalid Instance:", __func__);
  638. QDF_BUG(0);
  639. return NULL;
  640. }
  641. if (!soc->ops->cmn_drv_ops ||
  642. !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr)
  643. return NULL;
  644. return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr
  645. (vdev);
  646. }
  647. /**
  648. * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev
  649. * @vdev: vdev handle
  650. *
  651. * Return: Handle to pdev
  652. */
  653. static inline struct cdp_pdev *cdp_get_pdev_from_vdev
  654. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  655. {
  656. if (!soc || !soc->ops) {
  657. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  658. "%s: Invalid Instance:", __func__);
  659. QDF_BUG(0);
  660. return NULL;
  661. }
  662. if (!soc->ops->cmn_drv_ops ||
  663. !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev)
  664. return NULL;
  665. return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev);
  666. }
  667. /**
  668. * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  669. * @vdev: vdev handle
  670. *
  671. * Return: Handle to control pdev
  672. */
  673. static inline struct cdp_cfg *
  674. cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  675. {
  676. if (!soc || !soc->ops) {
  677. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  678. "%s: Invalid Instance:", __func__);
  679. QDF_BUG(0);
  680. return NULL;
  681. }
  682. if (!soc->ops->cmn_drv_ops ||
  683. !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev)
  684. return NULL;
  685. return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev
  686. (vdev);
  687. }
  688. static inline struct cdp_vdev *
  689. cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  690. uint8_t vdev_id)
  691. {
  692. if (!soc || !soc->ops) {
  693. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  694. "%s: Invalid Instance:", __func__);
  695. QDF_BUG(0);
  696. return NULL;
  697. }
  698. if (!soc->ops->cmn_drv_ops ||
  699. !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id)
  700. return NULL;
  701. return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id
  702. (pdev, vdev_id);
  703. }
  704. static inline void
  705. cdp_soc_detach(ol_txrx_soc_handle soc)
  706. {
  707. if (!soc || !soc->ops) {
  708. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  709. "%s: Invalid Instance:", __func__);
  710. QDF_BUG(0);
  711. return;
  712. }
  713. if (!soc->ops->cmn_drv_ops ||
  714. !soc->ops->cmn_drv_ops->txrx_soc_detach)
  715. return;
  716. soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc);
  717. }
  718. static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc,
  719. void *peer_handle, uint8_t dialogtoken, uint16_t tid,
  720. uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum)
  721. {
  722. if (!soc || !soc->ops) {
  723. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  724. "%s: Invalid Instance:", __func__);
  725. QDF_BUG(0);
  726. return 0;
  727. }
  728. if (!soc->ops->cmn_drv_ops ||
  729. !soc->ops->cmn_drv_ops->addba_requestprocess)
  730. return 0;
  731. return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle,
  732. dialogtoken, tid, batimeout, buffersize, startseqnum);
  733. }
  734. static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc,
  735. void *peer_handle, uint8_t tid, uint8_t *dialogtoken,
  736. uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout)
  737. {
  738. if (!soc || !soc->ops) {
  739. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  740. "%s: Invalid Instance:", __func__);
  741. QDF_BUG(0);
  742. return;
  743. }
  744. if (!soc->ops->cmn_drv_ops ||
  745. !soc->ops->cmn_drv_ops->addba_responsesetup)
  746. return;
  747. soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid,
  748. dialogtoken, statuscode, buffersize, batimeout);
  749. }
  750. static inline int cdp_delba_process(ol_txrx_soc_handle soc,
  751. void *peer_handle, int tid, uint16_t reasoncode)
  752. {
  753. if (!soc || !soc->ops) {
  754. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  755. "%s: Invalid Instance:", __func__);
  756. QDF_BUG(0);
  757. return 0;
  758. }
  759. if (!soc->ops->cmn_drv_ops ||
  760. !soc->ops->cmn_drv_ops->delba_process)
  761. return 0;
  762. return soc->ops->cmn_drv_ops->delba_process(peer_handle,
  763. tid, reasoncode);
  764. }
  765. static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc,
  766. void *peer_handle, int tid, uint16_t statuscode)
  767. {
  768. if (!soc || !soc->ops) {
  769. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  770. "%s: Invalid Instance:", __func__);
  771. QDF_BUG(0);
  772. return;
  773. }
  774. if (!soc->ops->cmn_drv_ops ||
  775. !soc->ops->cmn_drv_ops->set_addba_response)
  776. return;
  777. soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode);
  778. }
  779. /**
  780. * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer
  781. * mac address
  782. * @soc: SOC handle
  783. * @peer_id: peer id of the peer for which mac_address is required
  784. * @mac_addr: reference to mac address
  785. *
  786. * reutm: vdev_id of the vap
  787. */
  788. static inline uint8_t
  789. cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id,
  790. uint8_t *mac_addr)
  791. {
  792. if (!soc || !soc->ops) {
  793. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  794. "%s: Invalid Instance:", __func__);
  795. QDF_BUG(0);
  796. return CDP_INVALID_VDEV_ID;
  797. }
  798. if (!soc->ops->cmn_drv_ops ||
  799. !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id)
  800. return CDP_INVALID_VDEV_ID;
  801. return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc,
  802. peer_id, mac_addr);
  803. }
  804. /**
  805. * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap
  806. * @vdev: vdev handle
  807. * @map_id: id of the tid map
  808. *
  809. * Return: void
  810. */
  811. static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc,
  812. struct cdp_vdev *vdev, uint8_t map_id)
  813. {
  814. if (!soc || !soc->ops) {
  815. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  816. "%s: Invalid Instance:", __func__);
  817. QDF_BUG(0);
  818. return;
  819. }
  820. if (!soc->ops->cmn_drv_ops ||
  821. !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map)
  822. return;
  823. soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev,
  824. map_id);
  825. }
  826. /**
  827. * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map
  828. * @pdev: pdev handle
  829. * @map_id: id of the tid map
  830. * @tos: index value in map that needs to be changed
  831. * @tid: tid value passed by user
  832. *
  833. * Return: void
  834. */
  835. static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc,
  836. struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid)
  837. {
  838. if (!soc || !soc->ops) {
  839. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  840. "%s: Invalid Instance:", __func__);
  841. QDF_BUG(0);
  842. return;
  843. }
  844. if (!soc->ops->cmn_drv_ops ||
  845. !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map)
  846. return;
  847. soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev,
  848. map_id, tos, tid);
  849. }
  850. /**
  851. * cdp_flush_cache_rx_queue() - flush cache rx queue frame
  852. *
  853. * Return: None
  854. */
  855. static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc)
  856. {
  857. if (!soc || !soc->ops) {
  858. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  859. "%s: Invalid Instance:", __func__);
  860. QDF_BUG(0);
  861. return;
  862. }
  863. if (!soc->ops->cmn_drv_ops ||
  864. !soc->ops->cmn_drv_ops->flush_cache_rx_queue)
  865. return;
  866. soc->ops->cmn_drv_ops->flush_cache_rx_queue();
  867. }
  868. /**
  869. * cdp_txrx_stats(): function to map to host and firmware statistics
  870. * Deprecated, use cdp_txrx_stats_request() instead.
  871. * @soc: soc handle
  872. * @vdev: virtual device
  873. * @stats: statistics option
  874. *
  875. * return: status
  876. */
  877. static inline
  878. int cdp_txrx_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  879. enum cdp_stats stats)
  880. {
  881. if (!soc || !soc->ops) {
  882. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  883. "%s: Invalid Instance:", __func__);
  884. QDF_BUG(0);
  885. return 0;
  886. }
  887. if (!soc->ops->cmn_drv_ops ||
  888. !soc->ops->cmn_drv_ops->txrx_stats)
  889. return 0;
  890. return soc->ops->cmn_drv_ops->txrx_stats(vdev, stats);
  891. }
  892. /**
  893. * cdp_txrx_stats_request(): function to map to host and firmware statistics
  894. * @soc: soc handle
  895. * @vdev: virtual device
  896. * @req: stats request container
  897. *
  898. * return: status
  899. */
  900. static inline
  901. int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  902. struct cdp_txrx_stats_req *req)
  903. {
  904. if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) {
  905. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  906. "%s: Invalid Instance:", __func__);
  907. QDF_ASSERT(0);
  908. return 0;
  909. }
  910. if (soc->ops->cmn_drv_ops->txrx_stats_request)
  911. return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req);
  912. return 0;
  913. }
  914. /**
  915. * cdp_txrx_intr_attach(): function to attach and configure interrupt
  916. * @soc: soc handle
  917. */
  918. static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc)
  919. {
  920. if (!soc || !soc->ops) {
  921. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  922. "%s: Invalid Instance:", __func__);
  923. QDF_BUG(0);
  924. return 0;
  925. }
  926. if (!soc->ops->cmn_drv_ops ||
  927. !soc->ops->cmn_drv_ops->txrx_intr_attach)
  928. return 0;
  929. return soc->ops->cmn_drv_ops->txrx_intr_attach(soc);
  930. }
  931. /**
  932. * cdp_txrx_intr_detach(): function to detach interrupt
  933. * @soc: soc handle
  934. */
  935. static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc)
  936. {
  937. if (!soc || !soc->ops) {
  938. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  939. "%s: Invalid Instance:", __func__);
  940. QDF_BUG(0);
  941. return;
  942. }
  943. if (!soc->ops->cmn_drv_ops ||
  944. !soc->ops->cmn_drv_ops->txrx_intr_detach)
  945. return;
  946. soc->ops->cmn_drv_ops->txrx_intr_detach(soc);
  947. }
  948. /**
  949. * cdp_display_stats(): function to map to dump stats
  950. * @soc: soc handle
  951. * @value: statistics option
  952. */
  953. static inline QDF_STATUS
  954. cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value,
  955. enum qdf_stats_verbosity_level level)
  956. {
  957. if (!soc || !soc->ops) {
  958. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  959. "%s: Invalid Instance:", __func__);
  960. QDF_BUG(0);
  961. return 0;
  962. }
  963. if (!soc->ops->cmn_drv_ops ||
  964. !soc->ops->cmn_drv_ops->display_stats)
  965. return 0;
  966. return soc->ops->cmn_drv_ops->display_stats(soc, value, level);
  967. }
  968. /**
  969. * cdp_set_pn_check(): function to set pn check
  970. * @soc: soc handle
  971. * @sec_type: security type
  972. * #rx_pn: receive pn
  973. */
  974. static inline int cdp_set_pn_check(ol_txrx_soc_handle soc,
  975. struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
  976. {
  977. if (!soc || !soc->ops) {
  978. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  979. "%s: Invalid Instance:", __func__);
  980. QDF_BUG(0);
  981. return 0;
  982. }
  983. if (!soc->ops->cmn_drv_ops ||
  984. !soc->ops->cmn_drv_ops->set_pn_check)
  985. return 0;
  986. soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle,
  987. sec_type, rx_pn);
  988. return 0;
  989. }
  990. /**
  991. * cdp_update_config_parameters(): function to propagate configuration
  992. * parameters to datapath
  993. * @soc: opaque soc handle
  994. * @cfg: configuration handle
  995. *
  996. * Return: status: 0 - Success, non-zero: Failure
  997. */
  998. static inline
  999. QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc,
  1000. struct cdp_config_params *cfg)
  1001. {
  1002. struct cdp_soc *psoc = (struct cdp_soc *)soc;
  1003. if (!soc || !soc->ops) {
  1004. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1005. "%s: Invalid Instance:", __func__);
  1006. QDF_BUG(0);
  1007. return 0;
  1008. }
  1009. if (!soc->ops->cmn_drv_ops ||
  1010. !soc->ops->cmn_drv_ops->update_config_parameters)
  1011. return QDF_STATUS_SUCCESS;
  1012. return soc->ops->cmn_drv_ops->update_config_parameters(psoc,
  1013. cfg);
  1014. }
  1015. /**
  1016. * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev
  1017. * @soc: opaque soc handle
  1018. * @pdev: data path pdev handle
  1019. *
  1020. * Return: opaque dp handle
  1021. */
  1022. static inline void *
  1023. cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev)
  1024. {
  1025. if (!soc || !soc->ops) {
  1026. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1027. "%s: Invalid Instance:", __func__);
  1028. QDF_BUG(0);
  1029. return 0;
  1030. }
  1031. if (soc->ops->cmn_drv_ops->get_dp_txrx_handle)
  1032. return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev);
  1033. return 0;
  1034. }
  1035. /**
  1036. * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev
  1037. * @soc: opaque soc handle
  1038. * @pdev: data path pdev handle
  1039. * @dp_hdl: opaque pointer for dp_txrx_handle
  1040. *
  1041. * Return: void
  1042. */
  1043. static inline void
  1044. cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl)
  1045. {
  1046. if (!soc || !soc->ops) {
  1047. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1048. "%s: Invalid Instance:", __func__);
  1049. QDF_BUG(0);
  1050. return;
  1051. }
  1052. if (!soc->ops->cmn_drv_ops ||
  1053. !soc->ops->cmn_drv_ops->set_dp_txrx_handle)
  1054. return;
  1055. soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl);
  1056. }
  1057. /*
  1058. * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc
  1059. * @soc: opaque soc handle
  1060. *
  1061. * Return: opaque extended dp handle
  1062. */
  1063. static inline void *
  1064. cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc)
  1065. {
  1066. if (!soc || !soc->ops) {
  1067. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1068. "%s: Invalid Instance:", __func__);
  1069. QDF_BUG(0);
  1070. return NULL;
  1071. }
  1072. if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle)
  1073. return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle(
  1074. (struct cdp_soc *) soc);
  1075. return NULL;
  1076. }
  1077. /**
  1078. * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc
  1079. * @soc: opaque soc handle
  1080. * @dp_hdl: opaque pointer for dp_txrx_handle
  1081. *
  1082. * Return: void
  1083. */
  1084. static inline void
  1085. cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle)
  1086. {
  1087. if (!soc || !soc->ops) {
  1088. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1089. "%s: Invalid Instance:", __func__);
  1090. QDF_BUG(0);
  1091. return;
  1092. }
  1093. if (!soc->ops->cmn_drv_ops ||
  1094. !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle)
  1095. return;
  1096. soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc,
  1097. dp_handle);
  1098. }
  1099. /**
  1100. * cdp_tx_send() - enqueue frame for transmission
  1101. * @soc: soc opaque handle
  1102. * @vdev: VAP device
  1103. * @nbuf: nbuf to be enqueued
  1104. *
  1105. * This API is used by Extended Datapath modules to enqueue frame for
  1106. * transmission
  1107. *
  1108. * Return: void
  1109. */
  1110. static inline void
  1111. cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf)
  1112. {
  1113. if (!soc || !soc->ops) {
  1114. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1115. "%s: Invalid Instance:", __func__);
  1116. QDF_BUG(0);
  1117. return;
  1118. }
  1119. if (!soc->ops->cmn_drv_ops ||
  1120. !soc->ops->cmn_drv_ops->tx_send)
  1121. return;
  1122. soc->ops->cmn_drv_ops->tx_send(vdev, nbuf);
  1123. }
  1124. /*
  1125. * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev
  1126. * @soc: opaque soc handle
  1127. * @pdev: data path pdev handle
  1128. *
  1129. * Return: pdev_id
  1130. */
  1131. static inline
  1132. uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc,
  1133. struct cdp_pdev *pdev)
  1134. {
  1135. if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev)
  1136. return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev);
  1137. return 0;
  1138. }
  1139. /**
  1140. * cdp_set_nac() - set nac
  1141. * @soc: opaque soc handle
  1142. * @peer: data path peer handle
  1143. *
  1144. */
  1145. static inline
  1146. void cdp_set_nac(ol_txrx_soc_handle soc,
  1147. struct cdp_peer *peer)
  1148. {
  1149. if (soc->ops->cmn_drv_ops->txrx_set_nac)
  1150. soc->ops->cmn_drv_ops->txrx_set_nac(peer);
  1151. }
  1152. /**
  1153. * cdp_set_pdev_tx_capture() - set pdev tx_capture
  1154. * @soc: opaque soc handle
  1155. * @pdev: data path pdev handle
  1156. * @val: value of pdev_tx_capture
  1157. *
  1158. * Return: void
  1159. */
  1160. static inline
  1161. void cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc,
  1162. struct cdp_pdev *pdev, int val)
  1163. {
  1164. if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture)
  1165. return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev,
  1166. val);
  1167. }
  1168. /**
  1169. * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id
  1170. * @soc: opaque soc handle
  1171. * @pdev: data path pdev handle
  1172. * @peer_id: data path peer id
  1173. * @peer_mac: peer_mac
  1174. *
  1175. * Return: void
  1176. */
  1177. static inline
  1178. void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc,
  1179. struct cdp_pdev *pdev_handle,
  1180. uint32_t peer_id, uint8_t *peer_mac)
  1181. {
  1182. if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id)
  1183. soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id(
  1184. pdev_handle, peer_id, peer_mac);
  1185. }
  1186. /**
  1187. * cdp_vdev_tx_lock() - acquire lock
  1188. * @soc: opaque soc handle
  1189. * @vdev: data path vdev handle
  1190. *
  1191. * Return: void
  1192. */
  1193. static inline
  1194. void cdp_vdev_tx_lock(ol_txrx_soc_handle soc,
  1195. struct cdp_vdev *vdev)
  1196. {
  1197. if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock)
  1198. soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev);
  1199. }
  1200. /**
  1201. * cdp_vdev_tx_unlock() - release lock
  1202. * @soc: opaque soc handle
  1203. * @vdev: data path vdev handle
  1204. *
  1205. * Return: void
  1206. */
  1207. static inline
  1208. void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc,
  1209. struct cdp_vdev *vdev)
  1210. {
  1211. if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock)
  1212. soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev);
  1213. }
  1214. /**
  1215. * cdp_ath_getstats() - get updated athstats
  1216. * @soc: opaque soc handle
  1217. * @pdev: data path pdev handle
  1218. * @net_device_stats: interface stats
  1219. * @rtnl_link_stats64: device statistics structure
  1220. *
  1221. * Return: void
  1222. */
  1223. #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
  1224. static inline void cdp_ath_getstats(ol_txrx_soc_handle soc,
  1225. struct cdp_pdev *pdev, struct net_device_stats *stats)
  1226. #else
  1227. static inline void cdp_ath_getstats(ol_txrx_soc_handle soc,
  1228. struct cdp_pdev *pdev, struct rtnl_link_stats64 *stats)
  1229. #endif
  1230. {
  1231. if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats)
  1232. soc->ops->cmn_drv_ops->txrx_ath_getstats(pdev, stats);
  1233. }
  1234. /**
  1235. * cdp_set_gid_flag() - set groupid flag
  1236. * @soc: opaque soc handle
  1237. * @pdev: data path pdev handle
  1238. * @mem_status: member status from grp management frame
  1239. * @user_position: user position from grp management frame
  1240. *
  1241. * Return: void
  1242. */
  1243. static inline
  1244. void cdp_set_gid_flag(ol_txrx_soc_handle soc,
  1245. struct cdp_pdev *pdev, u_int8_t *mem_status,
  1246. u_int8_t *user_position)
  1247. {
  1248. if (soc->ops->cmn_drv_ops->txrx_set_gid_flag)
  1249. soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position);
  1250. }
  1251. /**
  1252. * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version
  1253. * @soc: opaque soc handle
  1254. * @pdev: data path pdev handle
  1255. *
  1256. */
  1257. static inline
  1258. uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc,
  1259. struct cdp_pdev *pdev)
  1260. {
  1261. if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version)
  1262. return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev);
  1263. return 0;
  1264. }
  1265. /**
  1266. * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev
  1267. * @soc: opaque soc handle
  1268. * @ni: associated node
  1269. * @force: number of frame in SW queue
  1270. * Return: void
  1271. */
  1272. static inline
  1273. void cdp_if_mgmt_drain(ol_txrx_soc_handle soc,
  1274. void *ni, int force)
  1275. {
  1276. if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain)
  1277. soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force);
  1278. }
  1279. #endif /* _CDP_TXRX_CMN_H_ */