cdp_txrx_ctrl.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * @file cdp_txrx_ctrl.h
  28. * @brief Define the host data path control API functions
  29. * called by the host control SW and the OS interface module
  30. */
  31. #ifndef _CDP_TXRX_CTRL_H_
  32. #define _CDP_TXRX_CTRL_H_
  33. #include "cdp_txrx_handle.h"
  34. static inline int cdp_is_target_ar900b
  35. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  36. {
  37. if (!soc || !soc->ops) {
  38. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  39. "%s: Invalid Instance:", __func__);
  40. QDF_BUG(0);
  41. return 0;
  42. }
  43. if (!soc->ops->ctrl_ops ||
  44. !soc->ops->ctrl_ops->txrx_is_target_ar900b)
  45. return 0;
  46. return soc->ops->ctrl_ops->txrx_is_target_ar900b(vdev);
  47. }
  48. /* WIN */
  49. static inline int
  50. cdp_mempools_attach(ol_txrx_soc_handle soc, void *ctrl_pdev)
  51. {
  52. if (!soc || !soc->ops) {
  53. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  54. "%s: Invalid Instance:", __func__);
  55. QDF_BUG(0);
  56. return 0;
  57. }
  58. if (!soc->ops->ctrl_ops ||
  59. !soc->ops->ctrl_ops->txrx_mempools_attach)
  60. return 0;
  61. return soc->ops->ctrl_ops->txrx_mempools_attach(ctrl_pdev);
  62. }
  63. /**
  64. * @brief set filter neighbour peers
  65. * @details
  66. * This defines interface function to set neighbour peer filtering.
  67. *
  68. * @param soc - the pointer to soc object
  69. * @param pdev - the pointer physical device object
  70. * @param val - the enable/disable value
  71. * @return - int
  72. */
  73. static inline int
  74. cdp_set_filter_neighbour_peers(ol_txrx_soc_handle soc,
  75. struct cdp_pdev *pdev, u_int32_t val)
  76. {
  77. if (!soc || !soc->ops) {
  78. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  79. "%s: Invalid Instance:", __func__);
  80. QDF_BUG(0);
  81. return 0;
  82. }
  83. if (!soc->ops->ctrl_ops ||
  84. !soc->ops->ctrl_ops->txrx_set_filter_neighbour_peers)
  85. return 0;
  86. return soc->ops->ctrl_ops->txrx_set_filter_neighbour_peers
  87. (pdev, val);
  88. }
  89. /**
  90. * @brief update the neighbour peer addresses
  91. * @details
  92. * This defines interface function to update neighbour peers addresses
  93. * which needs to be filtered
  94. *
  95. * @param soc - the pointer to soc object
  96. * @param pdev - the pointer to physical device object
  97. * @param cmd - add/del entry into peer table
  98. * @param macaddr - the address of neighbour peer
  99. * @return - int
  100. */
  101. static inline int
  102. cdp_update_filter_neighbour_peers(ol_txrx_soc_handle soc,
  103. struct cdp_pdev *pdev, uint32_t cmd, uint8_t *macaddr)
  104. {
  105. if (!soc || !soc->ops) {
  106. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  107. "%s: Invalid Instance:", __func__);
  108. QDF_BUG(0);
  109. return 0;
  110. }
  111. if (!soc->ops->ctrl_ops ||
  112. !soc->ops->ctrl_ops->txrx_update_filter_neighbour_peers)
  113. return 0;
  114. return soc->ops->ctrl_ops->txrx_update_filter_neighbour_peers
  115. (pdev, cmd, macaddr);
  116. }
  117. /**
  118. * @brief set the safemode of the device
  119. * @details
  120. * This flag is used to bypass the encrypt and decrypt processes when send and
  121. * receive packets. It works like open AUTH mode, HW will treate all packets
  122. * as non-encrypt frames because no key installed. For rx fragmented frames,
  123. * it bypasses all the rx defragmentaion.
  124. *
  125. * @param vdev - the data virtual device object
  126. * @param val - the safemode state
  127. * @return - void
  128. */
  129. static inline void
  130. cdp_set_safemode(ol_txrx_soc_handle soc,
  131. struct cdp_vdev *vdev, u_int32_t val)
  132. {
  133. if (!soc || !soc->ops) {
  134. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  135. "%s: Invalid Instance:", __func__);
  136. QDF_BUG(0);
  137. return;
  138. }
  139. if (!soc->ops->ctrl_ops ||
  140. !soc->ops->ctrl_ops->txrx_set_safemode)
  141. return;
  142. soc->ops->ctrl_ops->txrx_set_safemode(vdev, val);
  143. }
  144. /**
  145. * @brief configure the drop unencrypted frame flag
  146. * @details
  147. * Rx related. When set this flag, all the unencrypted frames
  148. * received over a secure connection will be discarded
  149. *
  150. * @param vdev - the data virtual device object
  151. * @param val - flag
  152. * @return - void
  153. */
  154. static inline void
  155. cdp_set_drop_unenc(ol_txrx_soc_handle soc,
  156. struct cdp_vdev *vdev, u_int32_t val)
  157. {
  158. if (!soc || !soc->ops) {
  159. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  160. "%s: Invalid Instance:", __func__);
  161. QDF_BUG(0);
  162. return;
  163. }
  164. if (!soc->ops->ctrl_ops ||
  165. !soc->ops->ctrl_ops->txrx_set_drop_unenc)
  166. return;
  167. soc->ops->ctrl_ops->txrx_set_drop_unenc(vdev, val);
  168. }
  169. /**
  170. * @brief set the Tx encapsulation type of the VDEV
  171. * @details
  172. * This will be used to populate the HTT desc packet type field during Tx
  173. *
  174. * @param vdev - the data virtual device object
  175. * @param val - the Tx encap type (htt_cmn_pkt_type)
  176. * @return - void
  177. */
  178. static inline void
  179. cdp_set_tx_encap_type(ol_txrx_soc_handle soc,
  180. struct cdp_vdev *vdev, enum htt_cmn_pkt_type val)
  181. {
  182. if (!soc || !soc->ops) {
  183. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  184. "%s: Invalid Instance:", __func__);
  185. QDF_BUG(0);
  186. return;
  187. }
  188. if (!soc->ops->ctrl_ops ||
  189. !soc->ops->ctrl_ops->txrx_set_tx_encap_type)
  190. return;
  191. soc->ops->ctrl_ops->txrx_set_tx_encap_type(vdev, val);
  192. }
  193. /**
  194. * @brief set the Rx decapsulation type of the VDEV
  195. * @details
  196. * This will be used to configure into firmware and hardware which format to
  197. * decap all Rx packets into, for all peers under the VDEV.
  198. *
  199. * @param vdev - the data virtual device object
  200. * @param val - the Rx decap mode (htt_cmn_pkt_type)
  201. * @return - void
  202. */
  203. static inline void
  204. cdp_set_vdev_rx_decap_type(ol_txrx_soc_handle soc,
  205. struct cdp_vdev *vdev, enum htt_cmn_pkt_type val)
  206. {
  207. if (!soc || !soc->ops) {
  208. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  209. "%s: Invalid Instance:", __func__);
  210. QDF_BUG(0);
  211. return;
  212. }
  213. if (!soc->ops->ctrl_ops ||
  214. !soc->ops->ctrl_ops->txrx_set_vdev_rx_decap_type)
  215. return;
  216. soc->ops->ctrl_ops->txrx_set_vdev_rx_decap_type
  217. (vdev, val);
  218. }
  219. /**
  220. * @brief get the Rx decapsulation type of the VDEV
  221. *
  222. * @param vdev - the data virtual device object
  223. * @return - the Rx decap type (htt_cmn_pkt_type)
  224. */
  225. static inline enum htt_cmn_pkt_type
  226. cdp_get_vdev_rx_decap_type(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  227. {
  228. if (!soc || !soc->ops) {
  229. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  230. "%s: Invalid Instance:", __func__);
  231. QDF_BUG(0);
  232. return 0;
  233. }
  234. if (!soc->ops->ctrl_ops ||
  235. !soc->ops->ctrl_ops->txrx_get_vdev_rx_decap_type)
  236. return 0;
  237. return soc->ops->ctrl_ops->txrx_get_vdev_rx_decap_type(vdev);
  238. }
  239. /**
  240. * @brief set the Reo Destination ring for the pdev
  241. * @details
  242. * This will be used to configure the Reo Destination ring for this pdev.
  243. *
  244. * @param soc - pointer to the soc
  245. * @param pdev - the data physical device object
  246. * @param val - the Reo destination ring index (1 to 4)
  247. * @return - void
  248. */
  249. static inline void
  250. cdp_set_pdev_reo_dest(ol_txrx_soc_handle soc,
  251. struct cdp_pdev *pdev, enum cdp_host_reo_dest_ring val)
  252. {
  253. if (!soc || !soc->ops) {
  254. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  255. "%s: Invalid Instance:", __func__);
  256. QDF_BUG(0);
  257. return;
  258. }
  259. if (!soc->ops->ctrl_ops ||
  260. !soc->ops->ctrl_ops->txrx_set_pdev_reo_dest)
  261. return;
  262. soc->ops->ctrl_ops->txrx_set_pdev_reo_dest
  263. (pdev, val);
  264. }
  265. /**
  266. * @brief get the Reo Destination ring for the pdev
  267. *
  268. * @param soc - pointer to the soc
  269. * @param pdev - the data physical device object
  270. * @return - the Reo destination ring index (1 to 4), 0 if not supported.
  271. */
  272. static inline enum cdp_host_reo_dest_ring
  273. cdp_get_pdev_reo_dest(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
  274. {
  275. if (!soc || !soc->ops) {
  276. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  277. "%s: Invalid Instance:", __func__);
  278. QDF_BUG(0);
  279. return cdp_host_reo_dest_ring_unknown;
  280. }
  281. if (!soc->ops->ctrl_ops ||
  282. !soc->ops->ctrl_ops->txrx_get_pdev_reo_dest)
  283. return cdp_host_reo_dest_ring_unknown;
  284. return soc->ops->ctrl_ops->txrx_get_pdev_reo_dest(pdev);
  285. }
  286. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  287. /**
  288. * @brief Update the authorize peer object at association time
  289. * @details
  290. * For the host-based implementation of rate-control, it
  291. * updates the peer/node-related parameters within rate-control
  292. * context of the peer at association.
  293. *
  294. * @param peer - pointer to the node's object
  295. * @authorize - either to authorize or unauthorize peer
  296. *
  297. * @return none
  298. */
  299. static inline void
  300. cdp_peer_authorize(ol_txrx_soc_handle soc,
  301. struct cdp_peer *peer, u_int32_t authorize)
  302. {
  303. if (!soc || !soc->ops) {
  304. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  305. "%s: Invalid Instance:", __func__);
  306. QDF_BUG(0);
  307. return;
  308. }
  309. if (!soc->ops->ctrl_ops ||
  310. !soc->ops->ctrl_ops->txrx_peer_authorize)
  311. return;
  312. soc->ops->ctrl_ops->txrx_peer_authorize
  313. (peer, authorize);
  314. }
  315. static inline bool
  316. cdp_set_inact_params(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  317. u_int16_t inact_check_interval,
  318. u_int16_t inact_normal,
  319. u_int16_t inact_overload)
  320. {
  321. if (!soc || !pdev || !soc->ops) {
  322. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  323. "%s: Invalid Instance:", __func__);
  324. QDF_BUG(0);
  325. return false;
  326. }
  327. if (!soc->ops->ctrl_ops ||
  328. !soc->ops->ctrl_ops->txrx_set_inact_params)
  329. return false;
  330. return soc->ops->ctrl_ops->txrx_set_inact_params
  331. (pdev, inact_check_interval, inact_normal,
  332. inact_overload);
  333. }
  334. static inline bool
  335. cdp_start_inact_timer(ol_txrx_soc_handle soc,
  336. struct cdp_pdev *pdev,
  337. bool enable)
  338. {
  339. if (!soc || !pdev || !soc->ops) {
  340. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  341. "%s: Invalid Instance:", __func__);
  342. QDF_BUG(0);
  343. return false;
  344. }
  345. if (!soc->ops->ctrl_ops ||
  346. !soc->ops->ctrl_ops->txrx_start_inact_timer)
  347. return false;
  348. return soc->ops->ctrl_ops->txrx_start_inact_timer
  349. (pdev, enable);
  350. }
  351. /**
  352. * @brief Set the overload status of the radio
  353. * @details
  354. * Set the overload status of the radio, updating the inactivity
  355. * threshold and inactivity count for each node.
  356. *
  357. * @param pdev - the data physical device object
  358. * @param overload - whether the radio is overloaded or not
  359. */
  360. static inline void
  361. cdp_set_overload(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  362. bool overload)
  363. {
  364. if (!soc || !pdev || !soc->ops) {
  365. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  366. "%s: Invalid Instance:", __func__);
  367. QDF_BUG(0);
  368. return;
  369. }
  370. if (!soc->ops->ctrl_ops ||
  371. !soc->ops->ctrl_ops->txrx_set_overload)
  372. return;
  373. soc->ops->ctrl_ops->txrx_set_overload(pdev, overload);
  374. }
  375. /**
  376. * @brief Check the inactivity status of the peer/node
  377. *
  378. * @param peer - pointer to the node's object
  379. * @return true if the node is inactive; otherwise return false
  380. */
  381. static inline bool
  382. cdp_peer_is_inact(ol_txrx_soc_handle soc, void *peer)
  383. {
  384. if (!soc || !peer || !soc->ops) {
  385. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  386. "%s: Invalid Instance:", __func__);
  387. QDF_BUG(0);
  388. return false;
  389. }
  390. if (!soc->ops->ctrl_ops ||
  391. !soc->ops->ctrl_ops->txrx_peer_is_inact)
  392. return false;
  393. return soc->ops->ctrl_ops->txrx_peer_is_inact(peer);
  394. }
  395. /**
  396. * @brief Mark inactivity status of the peer/node
  397. * @details
  398. * If it becomes active, reset inactivity count to reload value;
  399. * if the inactivity status changed, notify umac band steering.
  400. *
  401. * @param peer - pointer to the node's object
  402. * @param inactive - whether the node is inactive or not
  403. */
  404. static inline void
  405. cdp_mark_peer_inact(ol_txrx_soc_handle soc,
  406. void *peer,
  407. bool inactive)
  408. {
  409. if (!soc || !soc->ops) {
  410. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  411. "%s: Invalid Instance:", __func__);
  412. QDF_BUG(0);
  413. return;
  414. }
  415. if (!soc->ops->ctrl_ops ||
  416. !soc->ops->ctrl_ops->txrx_mark_peer_inact)
  417. return;
  418. soc->ops->ctrl_ops->txrx_mark_peer_inact
  419. (peer, inactive);
  420. }
  421. /* Should be ol_txrx_ctrl_api.h */
  422. static inline void cdp_set_mesh_mode
  423. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, u_int32_t val)
  424. {
  425. if (!soc || !soc->ops) {
  426. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  427. "%s: Invalid Instance:", __func__);
  428. QDF_BUG(0);
  429. return;
  430. }
  431. if (!soc->ops->ctrl_ops ||
  432. !soc->ops->ctrl_ops->txrx_set_mesh_mode)
  433. return;
  434. soc->ops->ctrl_ops->txrx_set_mesh_mode(vdev, val);
  435. }
  436. /**
  437. * @brief set mesh rx filter
  438. * @details based on the bits enabled in the filter packets has to be dropped.
  439. *
  440. * @param soc - pointer to the soc
  441. * @param vdev - the data virtual device object
  442. * @param val - value to be set
  443. * @return - void
  444. */
  445. static inline
  446. void cdp_set_mesh_rx_filter(ol_txrx_soc_handle soc,
  447. struct cdp_vdev *vdev, uint32_t val)
  448. {
  449. if (!soc || !soc->ops) {
  450. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  451. "%s: Invalid Instance:", __func__);
  452. QDF_BUG(0);
  453. return;
  454. }
  455. if (!soc->ops->ctrl_ops ||
  456. !soc->ops->ctrl_ops->txrx_set_mesh_rx_filter)
  457. return;
  458. soc->ops->ctrl_ops->txrx_set_mesh_rx_filter(vdev, val);
  459. }
  460. static inline void cdp_tx_flush_buffers
  461. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  462. {
  463. if (!soc || !soc->ops) {
  464. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  465. "%s: Invalid Instance:", __func__);
  466. QDF_BUG(0);
  467. return;
  468. }
  469. if (!soc->ops->ctrl_ops ||
  470. !soc->ops->ctrl_ops->tx_flush_buffers)
  471. return;
  472. soc->ops->ctrl_ops->tx_flush_buffers(vdev);
  473. }
  474. static inline void cdp_txrx_set_vdev_param(ol_txrx_soc_handle soc,
  475. struct cdp_vdev *vdev, enum cdp_vdev_param_type type,
  476. uint32_t val)
  477. {
  478. if (!soc || !soc->ops) {
  479. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  480. "%s: Invalid Instance:", __func__);
  481. QDF_BUG(0);
  482. return;
  483. }
  484. if (!soc->ops->ctrl_ops ||
  485. !soc->ops->ctrl_ops->txrx_set_vdev_param)
  486. return;
  487. soc->ops->ctrl_ops->txrx_set_vdev_param(vdev, type, val);
  488. }
  489. static inline void
  490. cdp_peer_set_nawds(ol_txrx_soc_handle soc,
  491. struct cdp_peer *peer, uint8_t value)
  492. {
  493. if (!soc || !soc->ops) {
  494. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  495. "%s: Invalid Instance:", __func__);
  496. QDF_BUG(0);
  497. return;
  498. }
  499. if (!soc->ops->ctrl_ops ||
  500. !soc->ops->ctrl_ops->txrx_peer_set_nawds)
  501. return;
  502. soc->ops->ctrl_ops->txrx_peer_set_nawds
  503. (peer, value);
  504. }
  505. static inline void cdp_txrx_set_pdev_param(ol_txrx_soc_handle soc,
  506. struct cdp_pdev *pdev, enum cdp_pdev_param_type type,
  507. uint8_t val)
  508. {
  509. if (!soc || !soc->ops) {
  510. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  511. "%s: Invalid Instance:", __func__);
  512. QDF_BUG(0);
  513. return;
  514. }
  515. if (!soc->ops->ctrl_ops ||
  516. !soc->ops->ctrl_ops->txrx_set_pdev_param)
  517. return;
  518. soc->ops->ctrl_ops->txrx_set_pdev_param
  519. (pdev, type, val);
  520. }
  521. /**
  522. * @brief Subscribe to a specified WDI event.
  523. * @details
  524. * This function adds the provided wdi_event_subscribe object to a list of
  525. * subscribers for the specified WDI event.
  526. * When the event in question happens, each subscriber for the event will
  527. * have their callback function invoked.
  528. * The order in which callback functions from multiple subscribers are
  529. * invoked is unspecified.
  530. *
  531. * @param soc - pointer to the soc
  532. * @param pdev - the data physical device object
  533. * @param event_cb_sub - the callback and context for the event subscriber
  534. * @param event - which event's notifications are being subscribed to
  535. * @return - int
  536. */
  537. static inline int
  538. cdp_wdi_event_sub(ol_txrx_soc_handle soc,
  539. struct cdp_pdev *pdev, void *event_cb_sub, uint32_t event)
  540. {
  541. if (!soc || !soc->ops) {
  542. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  543. "%s invalid instance", __func__);
  544. QDF_BUG(0);
  545. return 0;
  546. }
  547. if (!soc->ops->ctrl_ops ||
  548. !soc->ops->ctrl_ops->txrx_wdi_event_sub)
  549. return 0;
  550. return soc->ops->ctrl_ops->txrx_wdi_event_sub
  551. (pdev, event_cb_sub, event);
  552. }
  553. /**
  554. * @brief Unsubscribe from a specified WDI event.
  555. * @details
  556. * This function removes the provided event subscription object from the
  557. * list of subscribers for its event.
  558. * This function shall only be called if there was a successful prior call
  559. * to event_sub() on the same wdi_event_subscribe object.
  560. *
  561. * @param soc - pointer to the soc
  562. * @param pdev - the data physical device object
  563. * @param event_cb_sub - the callback and context for the event subscriber
  564. * @param event - which event's notifications are being subscribed to
  565. * @return - int
  566. */
  567. static inline int
  568. cdp_wdi_event_unsub(ol_txrx_soc_handle soc,
  569. struct cdp_pdev *pdev, void *event_cb_sub, uint32_t event)
  570. {
  571. if (!soc || !soc->ops) {
  572. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  573. "%s invalid instance", __func__);
  574. QDF_BUG(0);
  575. return 0;
  576. }
  577. if (!soc->ops->ctrl_ops ||
  578. !soc->ops->ctrl_ops->txrx_wdi_event_unsub)
  579. return 0;
  580. return soc->ops->ctrl_ops->txrx_wdi_event_unsub
  581. (pdev, event_cb_sub, event);
  582. }
  583. /**
  584. * @brief Get security type from the from peer.
  585. * @details
  586. * This function gets the Security information from the peer handler.
  587. * The security information is got from the rx descriptor and filled in
  588. * to the peer handler.
  589. *
  590. * @param soc - pointer to the soc
  591. * @param peer - peer handler
  592. * @param sec_idx - mcast or ucast frame type.
  593. * @return - int
  594. */
  595. static inline int
  596. cdp_get_sec_type(ol_txrx_soc_handle soc, struct cdp_peer *peer, uint8_t sec_idx)
  597. {
  598. if (!soc || !soc->ops) {
  599. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  600. "%s invalid instance", __func__);
  601. QDF_BUG(0);
  602. return A_ERROR;
  603. }
  604. if (!soc->ops->ctrl_ops ||
  605. !soc->ops->ctrl_ops->txrx_get_sec_type)
  606. return A_ERROR;
  607. return soc->ops->ctrl_ops->txrx_get_sec_type
  608. (peer, sec_idx);
  609. }
  610. /**
  611. * cdp_set_mgmt_tx_power(): function to set tx power for mgmt frames
  612. * @vdev_handle: vdev handle
  613. * @subtype_index: subtype
  614. * @tx_power: Tx power
  615. * Return: None
  616. */
  617. static inline int cdp_set_mgmt_tx_power(ol_txrx_soc_handle soc,
  618. struct cdp_vdev *vdev, uint8_t subtype, uint8_t tx_power)
  619. {
  620. if (!soc || !soc->ops) {
  621. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  622. "%s: Invalid Instance:", __func__);
  623. QDF_BUG(0);
  624. return 0;
  625. }
  626. if (!soc->ops->ctrl_ops ||
  627. !soc->ops->ctrl_ops->txrx_update_mgmt_txpow_vdev)
  628. return 0;
  629. soc->ops->ctrl_ops->txrx_update_mgmt_txpow_vdev(vdev,
  630. subtype, tx_power);
  631. return 0;
  632. }
  633. #endif