cdp_txrx_cmn.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. /*
  2. * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * @file cdp_txrx_cmn.h
  28. * @brief Define the host data path converged API functions
  29. * called by the host control SW and the OS interface module
  30. */
  31. #ifndef _CDP_TXRX_CMN_H_
  32. #define _CDP_TXRX_CMN_H_
  33. #include "qdf_types.h"
  34. #include "qdf_nbuf.h"
  35. #include "cdp_txrx_ops.h"
  36. #include "cdp_txrx_handle.h"
  37. #include "cdp_txrx_cmn_struct.h"
  38. /******************************************************************************
  39. *
  40. * Common Data Path Header File
  41. *
  42. *****************************************************************************/
  43. static inline int
  44. cdp_soc_attach_target(ol_txrx_soc_handle soc)
  45. {
  46. if (soc->ops->cmn_drv_ops->txrx_soc_attach_target)
  47. return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc);
  48. return 0;
  49. }
  50. static inline int
  51. cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc)
  52. {
  53. if (soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg)
  54. return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc);
  55. return 0;
  56. }
  57. static inline void
  58. cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config)
  59. {
  60. if (soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg)
  61. soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config);
  62. }
  63. static inline struct cdp_vdev *
  64. cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  65. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  66. {
  67. if (soc->ops->cmn_drv_ops->txrx_vdev_attach)
  68. return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev,
  69. vdev_mac_addr, vdev_id, op_mode);
  70. return NULL;
  71. }
  72. static inline void
  73. cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  74. ol_txrx_vdev_delete_cb callback, void *cb_context)
  75. {
  76. if (soc->ops->cmn_drv_ops->txrx_vdev_detach)
  77. return soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev,
  78. callback, cb_context);
  79. return;
  80. }
  81. static inline int
  82. cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
  83. {
  84. if (soc->ops->cmn_drv_ops->txrx_pdev_attach_target)
  85. return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev);
  86. return 0;
  87. }
  88. static inline struct cdp_pdev *cdp_pdev_attach
  89. (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
  90. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
  91. {
  92. if (soc->ops->cmn_drv_ops->txrx_pdev_attach)
  93. return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev,
  94. htc_pdev, osdev, pdev_id);
  95. return NULL;
  96. }
  97. static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc,
  98. struct cdp_pdev *pdev)
  99. {
  100. if (soc->ops->cmn_drv_ops->txrx_pdev_post_attach)
  101. return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev);
  102. return 0;
  103. }
  104. static inline void
  105. cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  106. {
  107. if (soc->ops->cmn_drv_ops->txrx_pdev_pre_detach)
  108. return soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force);
  109. return;
  110. }
  111. static inline void
  112. cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  113. {
  114. if (soc->ops->cmn_drv_ops->txrx_pdev_detach)
  115. return soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force);
  116. return;
  117. }
  118. static inline void *cdp_peer_create
  119. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  120. uint8_t *peer_mac_addr)
  121. {
  122. if (soc->ops->cmn_drv_ops->txrx_peer_create)
  123. return soc->ops->cmn_drv_ops->txrx_peer_create(vdev,
  124. peer_mac_addr);
  125. return NULL;
  126. }
  127. static inline void cdp_peer_setup
  128. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  129. {
  130. if (soc->ops->cmn_drv_ops->txrx_peer_setup)
  131. return soc->ops->cmn_drv_ops->txrx_peer_setup(vdev,
  132. peer);
  133. return;
  134. }
  135. static inline void cdp_peer_teardown
  136. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  137. {
  138. if (soc->ops->cmn_drv_ops->txrx_peer_teardown)
  139. return soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev,
  140. peer);
  141. return;
  142. }
  143. static inline void
  144. cdp_peer_delete(ol_txrx_soc_handle soc, void *peer)
  145. {
  146. if (soc->ops->cmn_drv_ops->txrx_peer_delete)
  147. return soc->ops->cmn_drv_ops->txrx_peer_delete(peer);
  148. return;
  149. }
  150. static inline int
  151. cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  152. uint8_t smart_monitor)
  153. {
  154. if (soc->ops->cmn_drv_ops->txrx_set_monitor_mode)
  155. return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev,
  156. smart_monitor);
  157. return 0;
  158. }
  159. static inline void
  160. cdp_set_curchan(ol_txrx_soc_handle soc,
  161. struct cdp_pdev *pdev,
  162. uint32_t chan_mhz)
  163. {
  164. if (soc->ops->cmn_drv_ops->txrx_set_curchan)
  165. return soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz);
  166. return;
  167. }
  168. static inline void
  169. cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  170. void *filter, uint32_t num)
  171. {
  172. if (soc->ops->cmn_drv_ops->txrx_set_privacy_filters)
  173. return soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev,
  174. filter, num);
  175. return;
  176. }
  177. /******************************************************************************
  178. * Data Interface (B Interface)
  179. *****************************************************************************/
  180. static inline void
  181. cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  182. void *osif_vdev, struct ol_txrx_ops *txrx_ops)
  183. {
  184. if (soc->ops->cmn_drv_ops->txrx_vdev_register)
  185. return soc->ops->cmn_drv_ops->txrx_vdev_register(vdev,
  186. osif_vdev, txrx_ops);
  187. return;
  188. }
  189. static inline int
  190. cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  191. qdf_nbuf_t tx_mgmt_frm, uint8_t type)
  192. {
  193. if (soc->ops->cmn_drv_ops->txrx_mgmt_send)
  194. return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev,
  195. tx_mgmt_frm, type);
  196. return 0;
  197. }
  198. static inline int
  199. cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  200. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  201. uint8_t use_6mbps, uint16_t chanfreq)
  202. {
  203. if (soc->ops->cmn_drv_ops->txrx_mgmt_send_ext)
  204. return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext
  205. (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq);
  206. return 0;
  207. }
  208. static inline void
  209. cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  210. uint8_t type,
  211. ol_txrx_mgmt_tx_cb download_cb,
  212. ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
  213. {
  214. if (soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set)
  215. return soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set
  216. (pdev, type, download_cb, ota_ack_cb, ctxt);
  217. return;
  218. }
  219. static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc,
  220. struct cdp_pdev *pdev)
  221. {
  222. if (soc->ops->cmn_drv_ops->txrx_get_tx_pending)
  223. return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev);
  224. return 0;
  225. }
  226. static inline void
  227. cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev,
  228. ol_txrx_data_tx_cb callback, void *ctxt)
  229. {
  230. if (soc->ops->cmn_drv_ops->txrx_data_tx_cb_set)
  231. return soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev,
  232. callback, ctxt);
  233. return;
  234. }
  235. /******************************************************************************
  236. * Statistics and Debugging Interface (C Inteface)
  237. *****************************************************************************/
  238. /**
  239. * External Device physical address types
  240. *
  241. * Currently, both MAC and IPA uController use the same size addresses
  242. * and descriptors are exchanged between these two depending on the mode.
  243. *
  244. * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA
  245. * operations. However, external device physical address sizes
  246. * may be different from host-specific physical address sizes.
  247. * This calls for the following definitions for target devices
  248. * (MAC, IPA uc).
  249. */
  250. #if HTT_PADDR64
  251. typedef uint64_t target_paddr_t;
  252. #else
  253. typedef uint32_t target_paddr_t;
  254. #endif /*HTT_PADDR64 */
  255. static inline int
  256. cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  257. int max_subfrms_ampdu,
  258. int max_subfrms_amsdu)
  259. {
  260. if (soc->ops->cmn_drv_ops->txrx_aggr_cfg)
  261. return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev,
  262. max_subfrms_ampdu, max_subfrms_amsdu);
  263. return 0;
  264. }
  265. static inline int
  266. cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  267. struct ol_txrx_stats_req *req, bool per_vdev,
  268. bool response_expected)
  269. {
  270. if (soc->ops->cmn_drv_ops->txrx_fw_stats_get)
  271. return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req,
  272. per_vdev, response_expected);
  273. return 0;
  274. }
  275. static inline int
  276. cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs)
  277. {
  278. if (soc->ops->cmn_drv_ops->txrx_debug)
  279. return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs);
  280. return 0;
  281. }
  282. static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc,
  283. struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val)
  284. {
  285. if (soc->ops->cmn_drv_ops->txrx_fw_stats_cfg)
  286. return soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev,
  287. cfg_stats_type, cfg_val);
  288. return;
  289. }
  290. static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level)
  291. {
  292. if (soc->ops->cmn_drv_ops->txrx_print_level_set)
  293. return soc->ops->cmn_drv_ops->txrx_print_level_set(level);
  294. return;
  295. }
  296. static inline uint8_t *
  297. cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  298. {
  299. if (soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr)
  300. return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev);
  301. return NULL;
  302. }
  303. /**
  304. * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  305. * vdev
  306. * @vdev: vdev handle
  307. *
  308. * Return: Handle to struct qdf_mac_addr
  309. */
  310. static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr
  311. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  312. {
  313. if (soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr)
  314. return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr
  315. (vdev);
  316. return NULL;
  317. }
  318. /**
  319. * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev
  320. * @vdev: vdev handle
  321. *
  322. * Return: Handle to pdev
  323. */
  324. static inline struct cdp_pdev *cdp_get_pdev_from_vdev
  325. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  326. {
  327. if (soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev)
  328. return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev);
  329. return NULL;
  330. }
  331. /**
  332. * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  333. * @vdev: vdev handle
  334. *
  335. * Return: Handle to control pdev
  336. */
  337. static inline struct cdp_cfg *
  338. cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  339. {
  340. if (soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev)
  341. return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev
  342. (vdev);
  343. return NULL;
  344. }
  345. static inline struct cdp_vdev *
  346. cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  347. uint8_t vdev_id)
  348. {
  349. if (soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id)
  350. return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id
  351. (pdev, vdev_id);
  352. return NULL;
  353. }
  354. static inline void
  355. cdp_soc_detach(ol_txrx_soc_handle soc)
  356. {
  357. if (soc->ops->cmn_drv_ops->txrx_soc_detach)
  358. return soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc);
  359. return;
  360. }
  361. static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc,
  362. void *peer_handle, uint8_t dialogtoken, uint16_t tid,
  363. uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum)
  364. {
  365. if (soc->ops->cmn_drv_ops->addba_requestprocess)
  366. return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle,
  367. dialogtoken, tid, batimeout, buffersize, startseqnum);
  368. return 0;
  369. }
  370. static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc,
  371. void *peer_handle, uint8_t tid, uint8_t *dialogtoken,
  372. uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout)
  373. {
  374. if (soc->ops->cmn_drv_ops->addba_responsesetup)
  375. soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid,
  376. dialogtoken, statuscode, buffersize, batimeout);
  377. }
  378. static inline int cdp_delba_process(ol_txrx_soc_handle soc,
  379. void *peer_handle, int tid, uint16_t reasoncode)
  380. {
  381. if (soc->ops->cmn_drv_ops->delba_process)
  382. return soc->ops->cmn_drv_ops->delba_process(peer_handle,
  383. tid, reasoncode);
  384. return 0;
  385. }
  386. /**
  387. * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer
  388. * mac address
  389. * @soc: SOC handle
  390. * @peer_id: peer id of the peer for which mac_address is required
  391. * @mac_addr: reference to mac address
  392. *
  393. * reutm: vdev_id of the vap
  394. */
  395. static inline uint8_t
  396. cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id,
  397. uint8_t *mac_addr)
  398. {
  399. if (soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id)
  400. return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc,
  401. peer_id, mac_addr);
  402. return CDP_INVALID_VDEV_ID;
  403. }
  404. /**
  405. * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap
  406. * @vdev: vdev handle
  407. * @map_id: id of the tid map
  408. *
  409. * Return: void
  410. */
  411. static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc,
  412. struct cdp_vdev *vdev, uint8_t map_id)
  413. {
  414. if (soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map)
  415. return soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev,
  416. map_id);
  417. return;
  418. }
  419. /**
  420. * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map
  421. * @pdev: pdev handle
  422. * @map_id: id of the tid map
  423. * @tos: index value in map that needs to be changed
  424. * @tid: tid value passed by user
  425. *
  426. * Return: void
  427. */
  428. static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc,
  429. struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid)
  430. {
  431. if (soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map) {
  432. return soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev,
  433. map_id, tos, tid);
  434. }
  435. return;
  436. }
  437. /**
  438. * cdp_flush_cache_rx_queue() - flush cache rx queue frame
  439. *
  440. * Return: None
  441. */
  442. static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc)
  443. {
  444. if (soc->ops->cmn_drv_ops->flush_cache_rx_queue)
  445. soc->ops->cmn_drv_ops->flush_cache_rx_queue();
  446. }
  447. /**
  448. * cdp_txrx_stats(): function to map to host and firmware statistics
  449. * @soc: soc handle
  450. * @vdev: virtual device
  451. * @stats: statistics option
  452. *
  453. * return: status
  454. */
  455. static inline
  456. int cdp_txrx_stats(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  457. enum cdp_stats stats)
  458. {
  459. if (soc->ops->cmn_drv_ops->txrx_stats)
  460. return soc->ops->cmn_drv_ops->txrx_stats(vdev, stats);
  461. return 0;
  462. }
  463. /**
  464. * cdp_txrx_intr_attach(): function to attach and configure interrupt
  465. * @soc: soc handle
  466. */
  467. static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc)
  468. {
  469. if (soc->ops->cmn_drv_ops->txrx_intr_attach)
  470. return soc->ops->cmn_drv_ops->txrx_intr_attach(soc);
  471. return 0;
  472. }
  473. /**
  474. * cdp_txrx_intr_detach(): function to detach interrupt
  475. * @soc: soc handle
  476. */
  477. static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc)
  478. {
  479. if (soc->ops->cmn_drv_ops->txrx_intr_detach)
  480. soc->ops->cmn_drv_ops->txrx_intr_detach(soc);
  481. }
  482. /**
  483. * cdp_display_stats(): function to map to dump stats
  484. * @soc: soc handle
  485. * @value: statistics option
  486. */
  487. static inline QDF_STATUS
  488. cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value)
  489. {
  490. if (soc->ops->cmn_drv_ops->display_stats)
  491. return soc->ops->cmn_drv_ops->display_stats(soc, value);
  492. return 0;
  493. }
  494. /**
  495. * cdp_set_pn_check(): function to set pn check
  496. * @soc: soc handle
  497. * @sec_type: security type
  498. * #rx_pn: receive pn
  499. */
  500. static inline int cdp_set_pn_check(ol_txrx_soc_handle soc,
  501. struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
  502. {
  503. if (soc->ops->cmn_drv_ops->set_pn_check)
  504. soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle,
  505. sec_type, rx_pn);
  506. return 0;
  507. }
  508. #endif /* _CDP_TXRX_CMN_H_ */