cdp_txrx_ctrl.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * @file cdp_txrx_ctrl.h
  28. * @brief Define the host data path control API functions
  29. * called by the host control SW and the OS interface module
  30. */
  31. #ifndef _CDP_TXRX_CTRL_H_
  32. #define _CDP_TXRX_CTRL_H_
  33. #include "cdp_txrx_handle.h"
  34. #include "cdp_txrx_cmn_struct.h"
  35. #include "cdp_txrx_ops.h"
  36. static inline int cdp_is_target_ar900b
  37. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  38. {
  39. if (!soc || !soc->ops) {
  40. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  41. "%s: Invalid Instance:", __func__);
  42. QDF_BUG(0);
  43. return 0;
  44. }
  45. if (!soc->ops->ctrl_ops ||
  46. !soc->ops->ctrl_ops->txrx_is_target_ar900b)
  47. return 0;
  48. return soc->ops->ctrl_ops->txrx_is_target_ar900b(vdev);
  49. }
  50. /* WIN */
  51. static inline int
  52. cdp_mempools_attach(ol_txrx_soc_handle soc, void *ctrl_pdev)
  53. {
  54. if (!soc || !soc->ops) {
  55. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  56. "%s: Invalid Instance:", __func__);
  57. QDF_BUG(0);
  58. return 0;
  59. }
  60. if (!soc->ops->ctrl_ops ||
  61. !soc->ops->ctrl_ops->txrx_mempools_attach)
  62. return 0;
  63. return soc->ops->ctrl_ops->txrx_mempools_attach(ctrl_pdev);
  64. }
  65. /**
  66. * @brief set filter neighbour peers
  67. * @details
  68. * This defines interface function to set neighbour peer filtering.
  69. *
  70. * @param soc - the pointer to soc object
  71. * @param pdev - the pointer physical device object
  72. * @param val - the enable/disable value
  73. * @return - int
  74. */
  75. static inline int
  76. cdp_set_filter_neighbour_peers(ol_txrx_soc_handle soc,
  77. struct cdp_pdev *pdev, u_int32_t val)
  78. {
  79. if (!soc || !soc->ops) {
  80. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  81. "%s: Invalid Instance:", __func__);
  82. QDF_BUG(0);
  83. return 0;
  84. }
  85. if (!soc->ops->ctrl_ops ||
  86. !soc->ops->ctrl_ops->txrx_set_filter_neighbour_peers)
  87. return 0;
  88. return soc->ops->ctrl_ops->txrx_set_filter_neighbour_peers
  89. (pdev, val);
  90. }
  91. /**
  92. * @brief update the neighbour peer addresses
  93. * @details
  94. * This defines interface function to update neighbour peers addresses
  95. * which needs to be filtered
  96. *
  97. * @param soc - the pointer to soc object
  98. * @param pdev - the pointer to physical device object
  99. * @param cmd - add/del entry into peer table
  100. * @param macaddr - the address of neighbour peer
  101. * @return - int
  102. */
  103. static inline int
  104. cdp_update_filter_neighbour_peers(ol_txrx_soc_handle soc,
  105. struct cdp_pdev *pdev, uint32_t cmd, uint8_t *macaddr)
  106. {
  107. if (!soc || !soc->ops) {
  108. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  109. "%s: Invalid Instance:", __func__);
  110. QDF_BUG(0);
  111. return 0;
  112. }
  113. if (!soc->ops->ctrl_ops ||
  114. !soc->ops->ctrl_ops->txrx_update_filter_neighbour_peers)
  115. return 0;
  116. return soc->ops->ctrl_ops->txrx_update_filter_neighbour_peers
  117. (pdev, cmd, macaddr);
  118. }
  119. /**
  120. * @brief set the safemode of the device
  121. * @details
  122. * This flag is used to bypass the encrypt and decrypt processes when send and
  123. * receive packets. It works like open AUTH mode, HW will treate all packets
  124. * as non-encrypt frames because no key installed. For rx fragmented frames,
  125. * it bypasses all the rx defragmentaion.
  126. *
  127. * @param vdev - the data virtual device object
  128. * @param val - the safemode state
  129. * @return - void
  130. */
  131. static inline void
  132. cdp_set_safemode(ol_txrx_soc_handle soc,
  133. struct cdp_vdev *vdev, u_int32_t val)
  134. {
  135. if (!soc || !soc->ops) {
  136. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  137. "%s: Invalid Instance:", __func__);
  138. QDF_BUG(0);
  139. return;
  140. }
  141. if (!soc->ops->ctrl_ops ||
  142. !soc->ops->ctrl_ops->txrx_set_safemode)
  143. return;
  144. soc->ops->ctrl_ops->txrx_set_safemode(vdev, val);
  145. }
  146. /**
  147. * @brief configure the drop unencrypted frame flag
  148. * @details
  149. * Rx related. When set this flag, all the unencrypted frames
  150. * received over a secure connection will be discarded
  151. *
  152. * @param vdev - the data virtual device object
  153. * @param val - flag
  154. * @return - void
  155. */
  156. static inline void
  157. cdp_set_drop_unenc(ol_txrx_soc_handle soc,
  158. struct cdp_vdev *vdev, u_int32_t val)
  159. {
  160. if (!soc || !soc->ops) {
  161. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  162. "%s: Invalid Instance:", __func__);
  163. QDF_BUG(0);
  164. return;
  165. }
  166. if (!soc->ops->ctrl_ops ||
  167. !soc->ops->ctrl_ops->txrx_set_drop_unenc)
  168. return;
  169. soc->ops->ctrl_ops->txrx_set_drop_unenc(vdev, val);
  170. }
  171. /**
  172. * @brief set the Tx encapsulation type of the VDEV
  173. * @details
  174. * This will be used to populate the HTT desc packet type field during Tx
  175. *
  176. * @param vdev - the data virtual device object
  177. * @param val - the Tx encap type (htt_cmn_pkt_type)
  178. * @return - void
  179. */
  180. static inline void
  181. cdp_set_tx_encap_type(ol_txrx_soc_handle soc,
  182. struct cdp_vdev *vdev, enum htt_cmn_pkt_type val)
  183. {
  184. if (!soc || !soc->ops) {
  185. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  186. "%s: Invalid Instance:", __func__);
  187. QDF_BUG(0);
  188. return;
  189. }
  190. if (!soc->ops->ctrl_ops ||
  191. !soc->ops->ctrl_ops->txrx_set_tx_encap_type)
  192. return;
  193. soc->ops->ctrl_ops->txrx_set_tx_encap_type(vdev, val);
  194. }
  195. /**
  196. * @brief set the Rx decapsulation type of the VDEV
  197. * @details
  198. * This will be used to configure into firmware and hardware which format to
  199. * decap all Rx packets into, for all peers under the VDEV.
  200. *
  201. * @param vdev - the data virtual device object
  202. * @param val - the Rx decap mode (htt_cmn_pkt_type)
  203. * @return - void
  204. */
  205. static inline void
  206. cdp_set_vdev_rx_decap_type(ol_txrx_soc_handle soc,
  207. struct cdp_vdev *vdev, enum htt_cmn_pkt_type val)
  208. {
  209. if (!soc || !soc->ops) {
  210. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  211. "%s: Invalid Instance:", __func__);
  212. QDF_BUG(0);
  213. return;
  214. }
  215. if (!soc->ops->ctrl_ops ||
  216. !soc->ops->ctrl_ops->txrx_set_vdev_rx_decap_type)
  217. return;
  218. soc->ops->ctrl_ops->txrx_set_vdev_rx_decap_type
  219. (vdev, val);
  220. }
  221. /**
  222. * @brief get the Rx decapsulation type of the VDEV
  223. *
  224. * @param vdev - the data virtual device object
  225. * @return - the Rx decap type (htt_cmn_pkt_type)
  226. */
  227. static inline enum htt_cmn_pkt_type
  228. cdp_get_vdev_rx_decap_type(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  229. {
  230. if (!soc || !soc->ops) {
  231. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  232. "%s: Invalid Instance:", __func__);
  233. QDF_BUG(0);
  234. return 0;
  235. }
  236. if (!soc->ops->ctrl_ops ||
  237. !soc->ops->ctrl_ops->txrx_get_vdev_rx_decap_type)
  238. return 0;
  239. return soc->ops->ctrl_ops->txrx_get_vdev_rx_decap_type(vdev);
  240. }
  241. /**
  242. * @brief set the Reo Destination ring for the pdev
  243. * @details
  244. * This will be used to configure the Reo Destination ring for this pdev.
  245. *
  246. * @param soc - pointer to the soc
  247. * @param pdev - the data physical device object
  248. * @param val - the Reo destination ring index (1 to 4)
  249. * @return - void
  250. */
  251. static inline void
  252. cdp_set_pdev_reo_dest(ol_txrx_soc_handle soc,
  253. struct cdp_pdev *pdev, enum cdp_host_reo_dest_ring val)
  254. {
  255. if (!soc || !soc->ops) {
  256. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  257. "%s: Invalid Instance:", __func__);
  258. QDF_BUG(0);
  259. return;
  260. }
  261. if (!soc->ops->ctrl_ops ||
  262. !soc->ops->ctrl_ops->txrx_set_pdev_reo_dest)
  263. return;
  264. soc->ops->ctrl_ops->txrx_set_pdev_reo_dest
  265. (pdev, val);
  266. }
  267. /**
  268. * @brief get the Reo Destination ring for the pdev
  269. *
  270. * @param soc - pointer to the soc
  271. * @param pdev - the data physical device object
  272. * @return - the Reo destination ring index (1 to 4), 0 if not supported.
  273. */
  274. static inline enum cdp_host_reo_dest_ring
  275. cdp_get_pdev_reo_dest(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
  276. {
  277. if (!soc || !soc->ops) {
  278. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  279. "%s: Invalid Instance:", __func__);
  280. QDF_BUG(0);
  281. return cdp_host_reo_dest_ring_unknown;
  282. }
  283. if (!soc->ops->ctrl_ops ||
  284. !soc->ops->ctrl_ops->txrx_get_pdev_reo_dest)
  285. return cdp_host_reo_dest_ring_unknown;
  286. return soc->ops->ctrl_ops->txrx_get_pdev_reo_dest(pdev);
  287. }
  288. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  289. /**
  290. * @brief Update the authorize peer object at association time
  291. * @details
  292. * For the host-based implementation of rate-control, it
  293. * updates the peer/node-related parameters within rate-control
  294. * context of the peer at association.
  295. *
  296. * @param peer - pointer to the node's object
  297. * @authorize - either to authorize or unauthorize peer
  298. *
  299. * @return none
  300. */
  301. static inline void
  302. cdp_peer_authorize(ol_txrx_soc_handle soc,
  303. struct cdp_peer *peer, u_int32_t authorize)
  304. {
  305. if (!soc || !soc->ops) {
  306. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  307. "%s: Invalid Instance:", __func__);
  308. QDF_BUG(0);
  309. return;
  310. }
  311. if (!soc->ops->ctrl_ops ||
  312. !soc->ops->ctrl_ops->txrx_peer_authorize)
  313. return;
  314. soc->ops->ctrl_ops->txrx_peer_authorize
  315. (peer, authorize);
  316. }
  317. static inline bool
  318. cdp_set_inact_params(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  319. u_int16_t inact_check_interval,
  320. u_int16_t inact_normal,
  321. u_int16_t inact_overload)
  322. {
  323. if (!soc || !pdev || !soc->ops) {
  324. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  325. "%s: Invalid Instance:", __func__);
  326. QDF_BUG(0);
  327. return false;
  328. }
  329. if (!soc->ops->ctrl_ops ||
  330. !soc->ops->ctrl_ops->txrx_set_inact_params)
  331. return false;
  332. return soc->ops->ctrl_ops->txrx_set_inact_params
  333. (pdev, inact_check_interval, inact_normal,
  334. inact_overload);
  335. }
  336. static inline bool
  337. cdp_start_inact_timer(ol_txrx_soc_handle soc,
  338. struct cdp_pdev *pdev,
  339. bool enable)
  340. {
  341. if (!soc || !pdev || !soc->ops) {
  342. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  343. "%s: Invalid Instance:", __func__);
  344. QDF_BUG(0);
  345. return false;
  346. }
  347. if (!soc->ops->ctrl_ops ||
  348. !soc->ops->ctrl_ops->txrx_start_inact_timer)
  349. return false;
  350. return soc->ops->ctrl_ops->txrx_start_inact_timer
  351. (pdev, enable);
  352. }
  353. /**
  354. * @brief Set the overload status of the radio
  355. * @details
  356. * Set the overload status of the radio, updating the inactivity
  357. * threshold and inactivity count for each node.
  358. *
  359. * @param pdev - the data physical device object
  360. * @param overload - whether the radio is overloaded or not
  361. */
  362. static inline void
  363. cdp_set_overload(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  364. bool overload)
  365. {
  366. if (!soc || !pdev || !soc->ops) {
  367. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  368. "%s: Invalid Instance:", __func__);
  369. QDF_BUG(0);
  370. return;
  371. }
  372. if (!soc->ops->ctrl_ops ||
  373. !soc->ops->ctrl_ops->txrx_set_overload)
  374. return;
  375. soc->ops->ctrl_ops->txrx_set_overload(pdev, overload);
  376. }
  377. /**
  378. * @brief Check the inactivity status of the peer/node
  379. *
  380. * @param peer - pointer to the node's object
  381. * @return true if the node is inactive; otherwise return false
  382. */
  383. static inline bool
  384. cdp_peer_is_inact(ol_txrx_soc_handle soc, void *peer)
  385. {
  386. if (!soc || !peer || !soc->ops) {
  387. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  388. "%s: Invalid Instance:", __func__);
  389. QDF_BUG(0);
  390. return false;
  391. }
  392. if (!soc->ops->ctrl_ops ||
  393. !soc->ops->ctrl_ops->txrx_peer_is_inact)
  394. return false;
  395. return soc->ops->ctrl_ops->txrx_peer_is_inact(peer);
  396. }
  397. /**
  398. * @brief Mark inactivity status of the peer/node
  399. * @details
  400. * If it becomes active, reset inactivity count to reload value;
  401. * if the inactivity status changed, notify umac band steering.
  402. *
  403. * @param peer - pointer to the node's object
  404. * @param inactive - whether the node is inactive or not
  405. */
  406. static inline void
  407. cdp_mark_peer_inact(ol_txrx_soc_handle soc,
  408. void *peer,
  409. bool inactive)
  410. {
  411. if (!soc || !soc->ops) {
  412. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  413. "%s: Invalid Instance:", __func__);
  414. QDF_BUG(0);
  415. return;
  416. }
  417. if (!soc->ops->ctrl_ops ||
  418. !soc->ops->ctrl_ops->txrx_mark_peer_inact)
  419. return;
  420. soc->ops->ctrl_ops->txrx_mark_peer_inact
  421. (peer, inactive);
  422. }
  423. /* Should be ol_txrx_ctrl_api.h */
  424. static inline void cdp_set_mesh_mode
  425. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, u_int32_t val)
  426. {
  427. if (!soc || !soc->ops) {
  428. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  429. "%s: Invalid Instance:", __func__);
  430. QDF_BUG(0);
  431. return;
  432. }
  433. if (!soc->ops->ctrl_ops ||
  434. !soc->ops->ctrl_ops->txrx_set_mesh_mode)
  435. return;
  436. soc->ops->ctrl_ops->txrx_set_mesh_mode(vdev, val);
  437. }
  438. /**
  439. * @brief set mesh rx filter
  440. * @details based on the bits enabled in the filter packets has to be dropped.
  441. *
  442. * @param soc - pointer to the soc
  443. * @param vdev - the data virtual device object
  444. * @param val - value to be set
  445. * @return - void
  446. */
  447. static inline
  448. void cdp_set_mesh_rx_filter(ol_txrx_soc_handle soc,
  449. struct cdp_vdev *vdev, uint32_t val)
  450. {
  451. if (!soc || !soc->ops) {
  452. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  453. "%s: Invalid Instance:", __func__);
  454. QDF_BUG(0);
  455. return;
  456. }
  457. if (!soc->ops->ctrl_ops ||
  458. !soc->ops->ctrl_ops->txrx_set_mesh_rx_filter)
  459. return;
  460. soc->ops->ctrl_ops->txrx_set_mesh_rx_filter(vdev, val);
  461. }
  462. static inline void cdp_tx_flush_buffers
  463. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  464. {
  465. if (!soc || !soc->ops) {
  466. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  467. "%s: Invalid Instance:", __func__);
  468. QDF_BUG(0);
  469. return;
  470. }
  471. if (!soc->ops->ctrl_ops ||
  472. !soc->ops->ctrl_ops->tx_flush_buffers)
  473. return;
  474. soc->ops->ctrl_ops->tx_flush_buffers(vdev);
  475. }
  476. static inline void cdp_txrx_set_vdev_param(ol_txrx_soc_handle soc,
  477. struct cdp_vdev *vdev, enum cdp_vdev_param_type type,
  478. uint32_t val)
  479. {
  480. if (!soc || !soc->ops) {
  481. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  482. "%s: Invalid Instance:", __func__);
  483. QDF_BUG(0);
  484. return;
  485. }
  486. if (!soc->ops->ctrl_ops ||
  487. !soc->ops->ctrl_ops->txrx_set_vdev_param)
  488. return;
  489. soc->ops->ctrl_ops->txrx_set_vdev_param(vdev, type, val);
  490. }
  491. static inline void
  492. cdp_peer_set_nawds(ol_txrx_soc_handle soc,
  493. struct cdp_peer *peer, uint8_t value)
  494. {
  495. if (!soc || !soc->ops) {
  496. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  497. "%s: Invalid Instance:", __func__);
  498. QDF_BUG(0);
  499. return;
  500. }
  501. if (!soc->ops->ctrl_ops ||
  502. !soc->ops->ctrl_ops->txrx_peer_set_nawds)
  503. return;
  504. soc->ops->ctrl_ops->txrx_peer_set_nawds
  505. (peer, value);
  506. }
  507. static inline void cdp_txrx_set_pdev_param(ol_txrx_soc_handle soc,
  508. struct cdp_pdev *pdev, enum cdp_pdev_param_type type,
  509. uint8_t val)
  510. {
  511. if (!soc || !soc->ops) {
  512. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  513. "%s: Invalid Instance:", __func__);
  514. QDF_BUG(0);
  515. return;
  516. }
  517. if (!soc->ops->ctrl_ops ||
  518. !soc->ops->ctrl_ops->txrx_set_pdev_param)
  519. return;
  520. soc->ops->ctrl_ops->txrx_set_pdev_param
  521. (pdev, type, val);
  522. }
  523. /**
  524. * @brief Subscribe to a specified WDI event.
  525. * @details
  526. * This function adds the provided wdi_event_subscribe object to a list of
  527. * subscribers for the specified WDI event.
  528. * When the event in question happens, each subscriber for the event will
  529. * have their callback function invoked.
  530. * The order in which callback functions from multiple subscribers are
  531. * invoked is unspecified.
  532. *
  533. * @param soc - pointer to the soc
  534. * @param pdev - the data physical device object
  535. * @param event_cb_sub - the callback and context for the event subscriber
  536. * @param event - which event's notifications are being subscribed to
  537. * @return - int
  538. */
  539. static inline int
  540. cdp_wdi_event_sub(ol_txrx_soc_handle soc,
  541. struct cdp_pdev *pdev, void *event_cb_sub, uint32_t event)
  542. {
  543. if (!soc || !soc->ops) {
  544. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  545. "%s invalid instance", __func__);
  546. QDF_BUG(0);
  547. return 0;
  548. }
  549. if (!soc->ops->ctrl_ops ||
  550. !soc->ops->ctrl_ops->txrx_wdi_event_sub)
  551. return 0;
  552. return soc->ops->ctrl_ops->txrx_wdi_event_sub
  553. (pdev, event_cb_sub, event);
  554. }
  555. /**
  556. * @brief Unsubscribe from a specified WDI event.
  557. * @details
  558. * This function removes the provided event subscription object from the
  559. * list of subscribers for its event.
  560. * This function shall only be called if there was a successful prior call
  561. * to event_sub() on the same wdi_event_subscribe object.
  562. *
  563. * @param soc - pointer to the soc
  564. * @param pdev - the data physical device object
  565. * @param event_cb_sub - the callback and context for the event subscriber
  566. * @param event - which event's notifications are being subscribed to
  567. * @return - int
  568. */
  569. static inline int
  570. cdp_wdi_event_unsub(ol_txrx_soc_handle soc,
  571. struct cdp_pdev *pdev, void *event_cb_sub, uint32_t event)
  572. {
  573. if (!soc || !soc->ops) {
  574. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  575. "%s invalid instance", __func__);
  576. QDF_BUG(0);
  577. return 0;
  578. }
  579. if (!soc->ops->ctrl_ops ||
  580. !soc->ops->ctrl_ops->txrx_wdi_event_unsub)
  581. return 0;
  582. return soc->ops->ctrl_ops->txrx_wdi_event_unsub
  583. (pdev, event_cb_sub, event);
  584. }
  585. /**
  586. * @brief Get security type from the from peer.
  587. * @details
  588. * This function gets the Security information from the peer handler.
  589. * The security information is got from the rx descriptor and filled in
  590. * to the peer handler.
  591. *
  592. * @param soc - pointer to the soc
  593. * @param peer - peer handler
  594. * @param sec_idx - mcast or ucast frame type.
  595. * @return - int
  596. */
  597. static inline int
  598. cdp_get_sec_type(ol_txrx_soc_handle soc, struct cdp_peer *peer, uint8_t sec_idx)
  599. {
  600. if (!soc || !soc->ops) {
  601. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  602. "%s invalid instance", __func__);
  603. QDF_BUG(0);
  604. return A_ERROR;
  605. }
  606. if (!soc->ops->ctrl_ops ||
  607. !soc->ops->ctrl_ops->txrx_get_sec_type)
  608. return A_ERROR;
  609. return soc->ops->ctrl_ops->txrx_get_sec_type
  610. (peer, sec_idx);
  611. }
  612. /**
  613. * cdp_set_mgmt_tx_power(): function to set tx power for mgmt frames
  614. * @vdev_handle: vdev handle
  615. * @subtype_index: subtype
  616. * @tx_power: Tx power
  617. * Return: None
  618. */
  619. static inline int cdp_set_mgmt_tx_power(ol_txrx_soc_handle soc,
  620. struct cdp_vdev *vdev, uint8_t subtype, uint8_t tx_power)
  621. {
  622. if (!soc || !soc->ops) {
  623. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  624. "%s: Invalid Instance:", __func__);
  625. QDF_BUG(0);
  626. return 0;
  627. }
  628. if (!soc->ops->ctrl_ops ||
  629. !soc->ops->ctrl_ops->txrx_update_mgmt_txpow_vdev)
  630. return 0;
  631. soc->ops->ctrl_ops->txrx_update_mgmt_txpow_vdev(vdev,
  632. subtype, tx_power);
  633. return 0;
  634. }
  635. static inline void *
  636. cdp_get_pldev(ol_txrx_soc_handle soc,
  637. struct cdp_pdev *pdev)
  638. {
  639. if (!soc || !soc->ops) {
  640. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  641. "%s invalid instance", __func__);
  642. QDF_BUG(0);
  643. return NULL;
  644. }
  645. if (!soc->ops->ctrl_ops || !soc->ops->ctrl_ops->txrx_get_pldev)
  646. return NULL;
  647. return soc->ops->ctrl_ops->txrx_get_pldev(pdev);
  648. }
  649. #ifdef ATH_SUPPORT_NAC_RSSI
  650. /**
  651. * cdp_vdev_config_for_nac_rssi(): To invoke dp callback for nac rssi config
  652. * @soc: soc pointer
  653. * @vdev: vdev pointer
  654. * @nac_cmd: specfies nac_rss config action add, del, list
  655. * @bssid: Neighbour bssid
  656. * @client_macaddr: Non-Associated client MAC
  657. * @chan_num: channel number to scan
  658. *
  659. * Return: QDF_STATUS
  660. */
  661. static inline QDF_STATUS cdp_vdev_config_for_nac_rssi(ol_txrx_soc_handle soc,
  662. struct cdp_vdev *vdev, enum cdp_nac_param_cmd nac_cmd,
  663. char *bssid, char *client_macaddr, uint8_t chan_num)
  664. {
  665. if (!soc || !soc->ops) {
  666. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  667. "%s invalid instance", __func__);
  668. QDF_BUG(0);
  669. return QDF_STATUS_E_FAILURE;
  670. }
  671. if (!soc->ops->ctrl_ops ||
  672. !soc->ops->ctrl_ops->txrx_vdev_config_for_nac_rssi)
  673. return QDF_STATUS_E_FAILURE;
  674. return soc->ops->ctrl_ops->txrx_vdev_config_for_nac_rssi(vdev,
  675. nac_cmd, bssid, client_macaddr, chan_num);
  676. }
  677. #endif
  678. #endif