hif.h 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _HIF_H_
  19. #define _HIF_H_
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif /* __cplusplus */
  23. /* Header files */
  24. #include <qdf_status.h>
  25. #include "qdf_nbuf.h"
  26. #include "qdf_lro.h"
  27. #include "ol_if_athvar.h"
  28. #include <linux/platform_device.h>
  29. #ifdef HIF_PCI
  30. #include <linux/pci.h>
  31. #endif /* HIF_PCI */
  32. #ifdef HIF_USB
  33. #include <linux/usb.h>
  34. #endif /* HIF_USB */
  35. #ifdef IPA_OFFLOAD
  36. #include <linux/ipa.h>
  37. #endif
  38. #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
  39. typedef void __iomem *A_target_id_t;
  40. typedef void *hif_handle_t;
  41. #define HIF_TYPE_AR6002 2
  42. #define HIF_TYPE_AR6003 3
  43. #define HIF_TYPE_AR6004 5
  44. #define HIF_TYPE_AR9888 6
  45. #define HIF_TYPE_AR6320 7
  46. #define HIF_TYPE_AR6320V2 8
  47. /* For attaching Peregrine 2.0 board host_reg_tbl only */
  48. #define HIF_TYPE_AR9888V2 9
  49. #define HIF_TYPE_ADRASTEA 10
  50. #define HIF_TYPE_AR900B 11
  51. #define HIF_TYPE_QCA9984 12
  52. #define HIF_TYPE_IPQ4019 13
  53. #define HIF_TYPE_QCA9888 14
  54. #define HIF_TYPE_QCA8074 15
  55. #define HIF_TYPE_QCA6290 16
  56. #ifdef IPA_OFFLOAD
  57. #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37
  58. #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
  59. #endif
  60. /* enum hif_ic_irq - enum defining integrated chip irq numbers
  61. * defining irq nubers that can be used by external modules like datapath
  62. */
  63. enum hif_ic_irq {
  64. host2wbm_desc_feed = 18,
  65. host2reo_re_injection,
  66. host2reo_command,
  67. host2rxdma_monitor_ring3,
  68. host2rxdma_monitor_ring2,
  69. host2rxdma_monitor_ring1,
  70. reo2host_exception,
  71. wbm2host_rx_release,
  72. reo2host_status,
  73. reo2host_destination_ring4,
  74. reo2host_destination_ring3,
  75. reo2host_destination_ring2,
  76. reo2host_destination_ring1,
  77. rxdma2host_monitor_destination_mac3,
  78. rxdma2host_monitor_destination_mac2,
  79. rxdma2host_monitor_destination_mac1,
  80. ppdu_end_interrupts_mac3,
  81. ppdu_end_interrupts_mac2,
  82. ppdu_end_interrupts_mac1,
  83. rxdma2host_monitor_status_ring_mac3,
  84. rxdma2host_monitor_status_ring_mac2,
  85. rxdma2host_monitor_status_ring_mac1,
  86. host2rxdma_host_buf_ring_mac3,
  87. host2rxdma_host_buf_ring_mac2,
  88. host2rxdma_host_buf_ring_mac1,
  89. rxdma2host_destination_ring_mac3,
  90. rxdma2host_destination_ring_mac2,
  91. rxdma2host_destination_ring_mac1,
  92. host2tcl_input_ring4,
  93. host2tcl_input_ring3,
  94. host2tcl_input_ring2,
  95. host2tcl_input_ring1,
  96. wbm2host_tx_completions_ring3,
  97. wbm2host_tx_completions_ring2,
  98. wbm2host_tx_completions_ring1,
  99. tcl2host_status_ring,
  100. };
  101. struct CE_state;
  102. #define CE_COUNT_MAX 12
  103. #define HIF_MAX_GRP_IRQ 16
  104. #define HIF_MAX_GROUP 8
  105. #ifdef CONFIG_SLUB_DEBUG_ON
  106. #ifndef CONFIG_WIN
  107. #define HIF_CONFIG_SLUB_DEBUG_ON
  108. #endif
  109. #endif
  110. #ifndef NAPI_YIELD_BUDGET_BASED
  111. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  112. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
  113. #else /* PERF build */
  114. #ifdef CONFIG_WIN
  115. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
  116. #else
  117. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
  118. #endif /* CONFIG_WIN */
  119. #endif /* SLUB_DEBUG_ON */
  120. #else /* NAPI_YIELD_BUDGET_BASED */
  121. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
  122. #endif /* NAPI_YIELD_BUDGET_BASED */
  123. #define QCA_NAPI_BUDGET 64
  124. #define QCA_NAPI_DEF_SCALE \
  125. (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
  126. #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
  127. /* NOTE: "napi->scale" can be changed,
  128. * but this does not change the number of buckets
  129. */
  130. #define QCA_NAPI_NUM_BUCKETS 4
  131. /**
  132. * qca_napi_stat - stats structure for execution contexts
  133. * @napi_schedules - number of times the schedule function is called
  134. * @napi_polls - number of times the execution context runs
  135. * @napi_completes - number of times that the generating interrupt is reenabled
  136. * @napi_workdone - cumulative of all work done reported by handler
  137. * @cpu_corrected - incremented when execution context runs on a different core
  138. * than the one that its irq is affined to.
  139. * @napi_budget_uses - histogram of work done per execution run
  140. * @time_limit_reache - count of yields due to time limit threshholds
  141. * @rxpkt_thresh_reached - count of yields due to a work limit
  142. *
  143. * needs to be renamed
  144. */
  145. struct qca_napi_stat {
  146. uint32_t napi_schedules;
  147. uint32_t napi_polls;
  148. uint32_t napi_completes;
  149. uint32_t napi_workdone;
  150. uint32_t cpu_corrected;
  151. uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
  152. uint32_t time_limit_reached;
  153. uint32_t rxpkt_thresh_reached;
  154. unsigned long long napi_max_poll_time;
  155. };
  156. /**
  157. * per NAPI instance data structure
  158. * This data structure holds stuff per NAPI instance.
  159. * Note that, in the current implementation, though scale is
  160. * an instance variable, it is set to the same value for all
  161. * instances.
  162. */
  163. struct qca_napi_info {
  164. struct net_device netdev; /* dummy net_dev */
  165. void *hif_ctx;
  166. struct napi_struct napi;
  167. uint8_t scale; /* currently same on all instances */
  168. uint8_t id;
  169. uint8_t cpu;
  170. int irq;
  171. struct qca_napi_stat stats[NR_CPUS];
  172. #ifdef RECEIVE_OFFLOAD
  173. /* will only be present for data rx CE's */
  174. void (*offld_flush_cb)(void *);
  175. struct napi_struct rx_thread_napi;
  176. struct net_device rx_thread_netdev;
  177. #endif /* RECEIVE_OFFLOAD */
  178. qdf_lro_ctx_t lro_ctx;
  179. };
  180. enum qca_napi_tput_state {
  181. QCA_NAPI_TPUT_UNINITIALIZED,
  182. QCA_NAPI_TPUT_LO,
  183. QCA_NAPI_TPUT_HI
  184. };
  185. enum qca_napi_cpu_state {
  186. QCA_NAPI_CPU_UNINITIALIZED,
  187. QCA_NAPI_CPU_DOWN,
  188. QCA_NAPI_CPU_UP };
  189. /**
  190. * struct qca_napi_cpu - an entry of the napi cpu table
  191. * @core_id: physical core id of the core
  192. * @cluster_id: cluster this core belongs to
  193. * @core_mask: mask to match all core of this cluster
  194. * @thread_mask: mask for this core within the cluster
  195. * @max_freq: maximum clock this core can be clocked at
  196. * same for all cpus of the same core.
  197. * @napis: bitmap of napi instances on this core
  198. * @execs: bitmap of execution contexts on this core
  199. * cluster_nxt: chain to link cores within the same cluster
  200. *
  201. * This structure represents a single entry in the napi cpu
  202. * table. The table is part of struct qca_napi_data.
  203. * This table is initialized by the init function, called while
  204. * the first napi instance is being created, updated by hotplug
  205. * notifier and when cpu affinity decisions are made (by throughput
  206. * detection), and deleted when the last napi instance is removed.
  207. */
  208. struct qca_napi_cpu {
  209. enum qca_napi_cpu_state state;
  210. int core_id;
  211. int cluster_id;
  212. cpumask_t core_mask;
  213. cpumask_t thread_mask;
  214. unsigned int max_freq;
  215. uint32_t napis;
  216. uint32_t execs;
  217. int cluster_nxt; /* index, not pointer */
  218. };
  219. /**
  220. * struct qca_napi_data - collection of napi data for a single hif context
  221. * @hif_softc: pointer to the hif context
  222. * @lock: spinlock used in the event state machine
  223. * @state: state variable used in the napi stat machine
  224. * @ce_map: bit map indicating which ce's have napis running
  225. * @exec_map: bit map of instanciated exec contexts
  226. * @user_cpu_affin_map: CPU affinity map from INI config.
  227. * @napi_cpu: cpu info for irq affinty
  228. * @lilcl_head:
  229. * @bigcl_head:
  230. * @napi_mode: irq affinity & clock voting mode
  231. * @cpuhp_handler: CPU hotplug event registration handle
  232. */
  233. struct qca_napi_data {
  234. struct hif_softc *hif_softc;
  235. qdf_spinlock_t lock;
  236. uint32_t state;
  237. /* bitmap of created/registered NAPI instances, indexed by pipe_id,
  238. * not used by clients (clients use an id returned by create)
  239. */
  240. uint32_t ce_map;
  241. uint32_t exec_map;
  242. uint32_t user_cpu_affin_mask;
  243. struct qca_napi_info *napis[CE_COUNT_MAX];
  244. struct qca_napi_cpu napi_cpu[NR_CPUS];
  245. int lilcl_head, bigcl_head;
  246. enum qca_napi_tput_state napi_mode;
  247. struct qdf_cpuhp_handler *cpuhp_handler;
  248. uint8_t flags;
  249. };
  250. /**
  251. * struct hif_config_info - Place Holder for hif confiruation
  252. * @enable_self_recovery: Self Recovery
  253. *
  254. * Structure for holding hif ini parameters.
  255. */
  256. struct hif_config_info {
  257. bool enable_self_recovery;
  258. #ifdef FEATURE_RUNTIME_PM
  259. bool enable_runtime_pm;
  260. u_int32_t runtime_pm_delay;
  261. #endif
  262. };
  263. /**
  264. * struct hif_target_info - Target Information
  265. * @target_version: Target Version
  266. * @target_type: Target Type
  267. * @target_revision: Target Revision
  268. * @soc_version: SOC Version
  269. *
  270. * Structure to hold target information.
  271. */
  272. struct hif_target_info {
  273. uint32_t target_version;
  274. uint32_t target_type;
  275. uint32_t target_revision;
  276. uint32_t soc_version;
  277. char *hw_name;
  278. };
  279. struct hif_opaque_softc {
  280. };
  281. /**
  282. * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
  283. *
  284. * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
  285. * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
  286. * minimize power
  287. * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
  288. * platform-specific measures to completely power-off
  289. * the module and associated hardware (i.e. cut power
  290. * supplies)
  291. */
  292. enum HIF_DEVICE_POWER_CHANGE_TYPE {
  293. HIF_DEVICE_POWER_UP,
  294. HIF_DEVICE_POWER_DOWN,
  295. HIF_DEVICE_POWER_CUT
  296. };
  297. /**
  298. * enum hif_enable_type: what triggered the enabling of hif
  299. *
  300. * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
  301. * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
  302. */
  303. enum hif_enable_type {
  304. HIF_ENABLE_TYPE_PROBE,
  305. HIF_ENABLE_TYPE_REINIT,
  306. HIF_ENABLE_TYPE_MAX
  307. };
  308. /**
  309. * enum hif_disable_type: what triggered the disabling of hif
  310. *
  311. * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
  312. * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
  313. * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
  314. * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
  315. */
  316. enum hif_disable_type {
  317. HIF_DISABLE_TYPE_PROBE_ERROR,
  318. HIF_DISABLE_TYPE_REINIT_ERROR,
  319. HIF_DISABLE_TYPE_REMOVE,
  320. HIF_DISABLE_TYPE_SHUTDOWN,
  321. HIF_DISABLE_TYPE_MAX
  322. };
  323. /**
  324. * enum hif_device_config_opcode: configure mode
  325. *
  326. * @HIF_DEVICE_POWER_STATE: device power state
  327. * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
  328. * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
  329. * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
  330. * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
  331. * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
  332. * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
  333. * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
  334. * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
  335. * @HIF_DEVICE_GET_OS_DEVICE: get OS device
  336. * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
  337. * @HIF_BMI_DONE: bmi done
  338. * @HIF_DEVICE_SET_TARGET_TYPE: set target type
  339. * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
  340. * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
  341. */
  342. enum hif_device_config_opcode {
  343. HIF_DEVICE_POWER_STATE = 0,
  344. HIF_DEVICE_GET_BLOCK_SIZE,
  345. HIF_DEVICE_GET_FIFO_ADDR,
  346. HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
  347. HIF_DEVICE_GET_IRQ_PROC_MODE,
  348. HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
  349. HIF_DEVICE_POWER_STATE_CHANGE,
  350. HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
  351. HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
  352. HIF_DEVICE_GET_OS_DEVICE,
  353. HIF_DEVICE_DEBUG_BUS_STATE,
  354. HIF_BMI_DONE,
  355. HIF_DEVICE_SET_TARGET_TYPE,
  356. HIF_DEVICE_SET_HTC_CONTEXT,
  357. HIF_DEVICE_GET_HTC_CONTEXT,
  358. };
  359. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  360. struct HID_ACCESS_LOG {
  361. uint32_t seqnum;
  362. bool is_write;
  363. void *addr;
  364. uint32_t value;
  365. };
  366. #endif
  367. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  368. uint32_t value);
  369. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
  370. #define HIF_MAX_DEVICES 1
  371. /**
  372. * struct htc_callbacks - Structure for HTC Callbacks methods
  373. * @context: context to pass to the dsrhandler
  374. * note : rwCompletionHandler is provided the context
  375. * passed to hif_read_write
  376. * @rwCompletionHandler: Read / write completion handler
  377. * @dsrHandler: DSR Handler
  378. */
  379. struct htc_callbacks {
  380. void *context;
  381. QDF_STATUS(*rwCompletionHandler)(void *rwContext, QDF_STATUS status);
  382. QDF_STATUS(*dsrHandler)(void *context);
  383. };
  384. /**
  385. * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
  386. * @context: Private data context
  387. * @set_recovery_in_progress: To Set Driver state for recovery in progress
  388. * @is_recovery_in_progress: Query if driver state is recovery in progress
  389. * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
  390. * @is_driver_unloading: Query if driver is unloading.
  391. *
  392. * This Structure provides callback pointer for HIF to query hdd for driver
  393. * states.
  394. */
  395. struct hif_driver_state_callbacks {
  396. void *context;
  397. void (*set_recovery_in_progress)(void *context, uint8_t val);
  398. bool (*is_recovery_in_progress)(void *context);
  399. bool (*is_load_unload_in_progress)(void *context);
  400. bool (*is_driver_unloading)(void *context);
  401. bool (*is_target_ready)(void *context);
  402. };
  403. /* This API detaches the HTC layer from the HIF device */
  404. void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
  405. /****************************************************************/
  406. /* BMI and Diag window abstraction */
  407. /****************************************************************/
  408. #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
  409. #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
  410. * handled atomically by
  411. * DiagRead/DiagWrite
  412. */
  413. /*
  414. * API to handle HIF-specific BMI message exchanges, this API is synchronous
  415. * and only allowed to be called from a context that can block (sleep)
  416. */
  417. QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
  418. qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
  419. uint8_t *pSendMessage, uint32_t Length,
  420. uint8_t *pResponseMessage,
  421. uint32_t *pResponseLength, uint32_t TimeoutMS);
  422. void hif_register_bmi_callbacks(struct hif_softc *hif_sc);
  423. /*
  424. * APIs to handle HIF specific diagnostic read accesses. These APIs are
  425. * synchronous and only allowed to be called from a context that
  426. * can block (sleep). They are not high performance APIs.
  427. *
  428. * hif_diag_read_access reads a 4 Byte aligned/length value from a
  429. * Target register or memory word.
  430. *
  431. * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
  432. */
  433. QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
  434. uint32_t address, uint32_t *data);
  435. QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
  436. uint8_t *data, int nbytes);
  437. void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
  438. void *ramdump_base, uint32_t address, uint32_t size);
  439. /*
  440. * APIs to handle HIF specific diagnostic write accesses. These APIs are
  441. * synchronous and only allowed to be called from a context that
  442. * can block (sleep).
  443. * They are not high performance APIs.
  444. *
  445. * hif_diag_write_access writes a 4 Byte aligned/length value to a
  446. * Target register or memory word.
  447. *
  448. * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
  449. */
  450. QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
  451. uint32_t address, uint32_t data);
  452. QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
  453. uint32_t address, uint8_t *data, int nbytes);
  454. typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
  455. void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
  456. bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
  457. /*
  458. * Set the FASTPATH_mode_on flag in sc, for use by data path
  459. */
  460. #ifdef WLAN_FEATURE_FASTPATH
  461. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
  462. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
  463. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
  464. int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  465. fastpath_msg_handler handler, void *context);
  466. #else
  467. static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  468. fastpath_msg_handler handler,
  469. void *context)
  470. {
  471. return QDF_STATUS_E_FAILURE;
  472. }
  473. static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
  474. {
  475. return NULL;
  476. }
  477. #endif
  478. /*
  479. * Enable/disable CDC max performance workaround
  480. * For max-performace set this to 0
  481. * To allow SoC to enter sleep set this to 1
  482. */
  483. #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
  484. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  485. qdf_shared_mem_t **ce_sr,
  486. uint32_t *ce_sr_ring_size,
  487. qdf_dma_addr_t *ce_reg_paddr);
  488. /**
  489. * @brief List of callbacks - filled in by HTC.
  490. */
  491. struct hif_msg_callbacks {
  492. void *Context;
  493. /**< context meaningful to HTC */
  494. QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  495. uint32_t transferID,
  496. uint32_t toeplitz_hash_result);
  497. QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  498. uint8_t pipeID);
  499. void (*txResourceAvailHandler)(void *context, uint8_t pipe);
  500. void (*fwEventHandler)(void *context, QDF_STATUS status);
  501. };
  502. enum hif_target_status {
  503. TARGET_STATUS_CONNECTED = 0, /* target connected */
  504. TARGET_STATUS_RESET, /* target got reset */
  505. TARGET_STATUS_EJECT, /* target got ejected */
  506. TARGET_STATUS_SUSPEND /*target got suspend */
  507. };
  508. /**
  509. * enum hif_attribute_flags: configure hif
  510. *
  511. * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
  512. * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
  513. * + No pktlog CE
  514. */
  515. enum hif_attribute_flags {
  516. HIF_LOWDESC_CE_CFG = 1,
  517. HIF_LOWDESC_CE_NO_PKTLOG_CFG
  518. };
  519. #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
  520. (attr |= (v & 0x01) << 5)
  521. #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
  522. (attr |= (v & 0x03) << 6)
  523. #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
  524. (attr |= (v & 0x01) << 13)
  525. #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
  526. (attr |= (v & 0x01) << 14)
  527. #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
  528. (attr |= (v & 0x01) << 15)
  529. #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
  530. (attr |= (v & 0x0FFF) << 16)
  531. #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
  532. (attr |= (v & 0x01) << 30)
  533. struct hif_ul_pipe_info {
  534. unsigned int nentries;
  535. unsigned int nentries_mask;
  536. unsigned int sw_index;
  537. unsigned int write_index; /* cached copy */
  538. unsigned int hw_index; /* cached copy */
  539. void *base_addr_owner_space; /* Host address space */
  540. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  541. };
  542. struct hif_dl_pipe_info {
  543. unsigned int nentries;
  544. unsigned int nentries_mask;
  545. unsigned int sw_index;
  546. unsigned int write_index; /* cached copy */
  547. unsigned int hw_index; /* cached copy */
  548. void *base_addr_owner_space; /* Host address space */
  549. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  550. };
  551. struct hif_pipe_addl_info {
  552. uint32_t pci_mem;
  553. uint32_t ctrl_addr;
  554. struct hif_ul_pipe_info ul_pipe;
  555. struct hif_dl_pipe_info dl_pipe;
  556. };
  557. #ifdef CONFIG_SLUB_DEBUG_ON
  558. #define MSG_FLUSH_NUM 16
  559. #else /* PERF build */
  560. #define MSG_FLUSH_NUM 32
  561. #endif /* SLUB_DEBUG_ON */
  562. struct hif_bus_id;
  563. void hif_claim_device(struct hif_opaque_softc *hif_ctx);
  564. QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
  565. int opcode, void *config, uint32_t config_len);
  566. void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
  567. void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
  568. void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
  569. struct hif_msg_callbacks *callbacks);
  570. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
  571. void hif_stop(struct hif_opaque_softc *hif_ctx);
  572. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
  573. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
  574. void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
  575. uint8_t cmd_id, bool start);
  576. QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  577. uint32_t transferID, uint32_t nbytes,
  578. qdf_nbuf_t wbuf, uint32_t data_attr);
  579. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  580. int force);
  581. void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
  582. void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
  583. uint8_t *DLPipe);
  584. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
  585. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  586. int *dl_is_polled);
  587. uint16_t
  588. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
  589. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
  590. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
  591. void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
  592. bool wait_for_it);
  593. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
  594. #ifndef HIF_PCI
  595. static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  596. {
  597. return 0;
  598. }
  599. #else
  600. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
  601. #endif
  602. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  603. u32 *revision, const char **target_name);
  604. #ifdef RECEIVE_OFFLOAD
  605. /**
  606. * hif_offld_flush_cb_register() - Register the offld flush callback
  607. * @scn: HIF opaque context
  608. * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
  609. * Or GRO/LRO flush when RxThread is not enabled. Called
  610. * with corresponding context for flush.
  611. * Return: None
  612. */
  613. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  614. void (offld_flush_handler)(void *ol_ctx));
  615. /**
  616. * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
  617. * @scn: HIF opaque context
  618. *
  619. * Return: None
  620. */
  621. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
  622. #endif
  623. void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
  624. void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
  625. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  626. int htc_htt_tx_endpoint);
  627. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
  628. enum qdf_bus_type bus_type,
  629. struct hif_driver_state_callbacks *cbk);
  630. void hif_close(struct hif_opaque_softc *hif_ctx);
  631. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  632. void *bdev, const struct hif_bus_id *bid,
  633. enum qdf_bus_type bus_type,
  634. enum hif_enable_type type);
  635. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
  636. void hif_display_stats(struct hif_opaque_softc *hif_ctx);
  637. void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
  638. #ifdef FEATURE_RUNTIME_PM
  639. struct hif_pm_runtime_lock;
  640. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
  641. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
  642. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
  643. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
  644. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
  645. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  646. struct hif_pm_runtime_lock *lock);
  647. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  648. struct hif_pm_runtime_lock *lock);
  649. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  650. struct hif_pm_runtime_lock *lock);
  651. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  652. struct hif_pm_runtime_lock *lock, unsigned int delay);
  653. #else
  654. struct hif_pm_runtime_lock {
  655. const char *name;
  656. };
  657. static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  658. static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  659. {}
  660. static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  661. { return 0; }
  662. static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  663. { return 0; }
  664. static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
  665. const char *name)
  666. { return 0; }
  667. static inline void
  668. hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  669. struct hif_pm_runtime_lock *lock) {}
  670. static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  671. struct hif_pm_runtime_lock *lock)
  672. { return 0; }
  673. static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  674. struct hif_pm_runtime_lock *lock)
  675. { return 0; }
  676. static inline int
  677. hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  678. struct hif_pm_runtime_lock *lock, unsigned int delay)
  679. { return 0; }
  680. #endif
  681. void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
  682. bool is_packet_log_enabled);
  683. void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
  684. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
  685. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
  686. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
  687. #ifdef IPA_OFFLOAD
  688. /**
  689. * hif_get_ipa_hw_type() - get IPA hw type
  690. *
  691. * This API return the IPA hw type.
  692. *
  693. * Return: IPA hw type
  694. */
  695. static inline
  696. enum ipa_hw_type hif_get_ipa_hw_type(void)
  697. {
  698. return ipa_get_hw_type();
  699. }
  700. /**
  701. * hif_get_ipa_present() - get IPA hw status
  702. *
  703. * This API return the IPA hw status.
  704. *
  705. * Return: true if IPA is present or false otherwise
  706. */
  707. static inline
  708. bool hif_get_ipa_present(void)
  709. {
  710. if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
  711. return true;
  712. else
  713. return false;
  714. }
  715. #endif
  716. int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
  717. /**
  718. * hif_bus_ealry_suspend() - stop non wmi tx traffic
  719. * @context: hif context
  720. */
  721. int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
  722. /**
  723. * hif_bus_late_resume() - resume non wmi traffic
  724. * @context: hif context
  725. */
  726. int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
  727. int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
  728. int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
  729. int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
  730. /**
  731. * hif_apps_irqs_enable() - Enables all irqs from the APPS side
  732. * @hif_ctx: an opaque HIF handle to use
  733. *
  734. * As opposed to the standard hif_irq_enable, this function always applies to
  735. * the APPS side kernel interrupt handling.
  736. *
  737. * Return: errno
  738. */
  739. int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
  740. /**
  741. * hif_apps_irqs_disable() - Disables all irqs from the APPS side
  742. * @hif_ctx: an opaque HIF handle to use
  743. *
  744. * As opposed to the standard hif_irq_disable, this function always applies to
  745. * the APPS side kernel interrupt handling.
  746. *
  747. * Return: errno
  748. */
  749. int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
  750. /**
  751. * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
  752. * @hif_ctx: an opaque HIF handle to use
  753. *
  754. * As opposed to the standard hif_irq_enable, this function always applies to
  755. * the APPS side kernel interrupt handling.
  756. *
  757. * Return: errno
  758. */
  759. int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
  760. /**
  761. * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
  762. * @hif_ctx: an opaque HIF handle to use
  763. *
  764. * As opposed to the standard hif_irq_disable, this function always applies to
  765. * the APPS side kernel interrupt handling.
  766. *
  767. * Return: errno
  768. */
  769. int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
  770. #ifdef FEATURE_RUNTIME_PM
  771. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  772. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
  773. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  774. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
  775. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
  776. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
  777. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
  778. #endif
  779. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
  780. int hif_dump_registers(struct hif_opaque_softc *scn);
  781. int ol_copy_ramdump(struct hif_opaque_softc *scn);
  782. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
  783. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  784. u32 *revision, const char **target_name);
  785. bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
  786. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
  787. struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
  788. scn);
  789. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
  790. struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
  791. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
  792. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  793. hif_target_status);
  794. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  795. struct hif_config_info *cfg);
  796. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
  797. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  798. uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
  799. int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
  800. transfer_id, u_int32_t len);
  801. int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
  802. uint32_t transfer_id, uint32_t download_len);
  803. void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
  804. void hif_ce_war_disable(void);
  805. void hif_ce_war_enable(void);
  806. void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
  807. #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
  808. struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
  809. struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
  810. uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
  811. uint32_t pipe_num);
  812. int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
  813. #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
  814. void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
  815. int rx_bundle_cnt);
  816. int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
  817. void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
  818. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
  819. enum hif_exec_type {
  820. HIF_EXEC_NAPI_TYPE,
  821. HIF_EXEC_TASKLET_TYPE,
  822. };
  823. typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
  824. uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
  825. uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  826. uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
  827. void *cb_ctx, const char *context_name,
  828. enum hif_exec_type type, uint32_t scale);
  829. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  830. const char *context_name);
  831. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  832. u_int8_t pipeid,
  833. struct hif_msg_callbacks *callbacks);
  834. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
  835. #ifdef __cplusplus
  836. }
  837. #endif
  838. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
  839. /**
  840. * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
  841. * @hif_ctx - the HIF context to assign the callback to
  842. * @callback - the callback to assign
  843. * @priv - the private data to pass to the callback when invoked
  844. *
  845. * Return: None
  846. */
  847. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  848. void (*callback)(void *),
  849. void *priv);
  850. #ifndef CONFIG_WIN
  851. #ifndef HIF_CE_DEBUG_DATA_BUF
  852. #define HIF_CE_DEBUG_DATA_BUF 0
  853. #endif
  854. #endif
  855. /*
  856. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  857. * for defined here
  858. */
  859. #if HIF_CE_DEBUG_DATA_BUF
  860. ssize_t hif_dump_desc_trace_buf(struct device *dev,
  861. struct device_attribute *attr, char *buf);
  862. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  863. const char *buf, size_t size);
  864. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
  865. const char *buf, size_t size);
  866. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
  867. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
  868. #endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
  869. /**
  870. * hif_set_ce_service_max_yield_time() - sets CE service max yield time
  871. * @hif: hif context
  872. * @ce_service_max_yield_time: CE service max yield time to set
  873. *
  874. * This API storess CE service max yield time in hif context based
  875. * on ini value.
  876. *
  877. * Return: void
  878. */
  879. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  880. uint32_t ce_service_max_yield_time);
  881. /**
  882. * hif_get_ce_service_max_yield_time() - get CE service max yield time
  883. * @hif: hif context
  884. *
  885. * This API returns CE service max yield time.
  886. *
  887. * Return: CE service max yield time
  888. */
  889. unsigned long long
  890. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
  891. /**
  892. * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
  893. * @hif: hif context
  894. * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
  895. *
  896. * This API stores CE service max rx ind flush in hif context based
  897. * on ini value.
  898. *
  899. * Return: void
  900. */
  901. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  902. uint8_t ce_service_max_rx_ind_flush);
  903. #ifdef OL_ATH_SMART_LOGGING
  904. /*
  905. * hif_log_ce_dump() - Copy all the CE DEST ring to buf
  906. * @scn : HIF handler
  907. * @buf_cur: Current pointer in ring buffer
  908. * @buf_init:Start of the ring buffer
  909. * @buf_sz: Size of the ring buffer
  910. * @ce: Copy Engine id
  911. * @skb_sz: Max size of the SKB buffer to be copied
  912. *
  913. * Calls the respective function to dump all the CE SRC/DEST ring descriptors
  914. * and buffers pointed by them in to the given buf
  915. *
  916. * Return: Current pointer in ring buffer
  917. */
  918. uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
  919. uint8_t *buf_init, uint32_t buf_sz,
  920. uint32_t ce, uint32_t skb_sz);
  921. #endif /* OL_ATH_SMART_LOGGING */
  922. #endif /* _HIF_H_ */