hif.h 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #ifndef _HIF_H_
  27. #define _HIF_H_
  28. #ifdef __cplusplus
  29. extern "C" {
  30. #endif /* __cplusplus */
  31. /* Header files */
  32. #include <qdf_status.h>
  33. #include "qdf_nbuf.h"
  34. #include "qdf_lro.h"
  35. #include "ol_if_athvar.h"
  36. #include <linux/platform_device.h>
  37. #ifdef HIF_PCI
  38. #include <linux/pci.h>
  39. #endif /* HIF_PCI */
  40. #ifdef HIF_USB
  41. #include <linux/usb.h>
  42. #endif /* HIF_USB */
  43. #ifdef IPA_OFFLOAD
  44. #include <linux/ipa.h>
  45. #endif
  46. #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
  47. typedef void __iomem *A_target_id_t;
  48. typedef void *hif_handle_t;
  49. #define HIF_TYPE_AR6002 2
  50. #define HIF_TYPE_AR6003 3
  51. #define HIF_TYPE_AR6004 5
  52. #define HIF_TYPE_AR9888 6
  53. #define HIF_TYPE_AR6320 7
  54. #define HIF_TYPE_AR6320V2 8
  55. /* For attaching Peregrine 2.0 board host_reg_tbl only */
  56. #define HIF_TYPE_AR9888V2 9
  57. #define HIF_TYPE_ADRASTEA 10
  58. #define HIF_TYPE_AR900B 11
  59. #define HIF_TYPE_QCA9984 12
  60. #define HIF_TYPE_IPQ4019 13
  61. #define HIF_TYPE_QCA9888 14
  62. #define HIF_TYPE_QCA8074 15
  63. #define HIF_TYPE_QCA6290 16
  64. #ifdef IPA_OFFLOAD
  65. #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37
  66. #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
  67. #endif
  68. /* enum hif_ic_irq - enum defining integrated chip irq numbers
  69. * defining irq nubers that can be used by external modules like datapath
  70. */
  71. enum hif_ic_irq {
  72. host2wbm_desc_feed = 18,
  73. host2reo_re_injection,
  74. host2reo_command,
  75. host2rxdma_monitor_ring3,
  76. host2rxdma_monitor_ring2,
  77. host2rxdma_monitor_ring1,
  78. reo2host_exception,
  79. wbm2host_rx_release,
  80. reo2host_status,
  81. reo2host_destination_ring4,
  82. reo2host_destination_ring3,
  83. reo2host_destination_ring2,
  84. reo2host_destination_ring1,
  85. rxdma2host_monitor_destination_mac3,
  86. rxdma2host_monitor_destination_mac2,
  87. rxdma2host_monitor_destination_mac1,
  88. ppdu_end_interrupts_mac3,
  89. ppdu_end_interrupts_mac2,
  90. ppdu_end_interrupts_mac1,
  91. rxdma2host_monitor_status_ring_mac3,
  92. rxdma2host_monitor_status_ring_mac2,
  93. rxdma2host_monitor_status_ring_mac1,
  94. host2rxdma_host_buf_ring_mac3,
  95. host2rxdma_host_buf_ring_mac2,
  96. host2rxdma_host_buf_ring_mac1,
  97. rxdma2host_destination_ring_mac3,
  98. rxdma2host_destination_ring_mac2,
  99. rxdma2host_destination_ring_mac1,
  100. host2tcl_input_ring4,
  101. host2tcl_input_ring3,
  102. host2tcl_input_ring2,
  103. host2tcl_input_ring1,
  104. wbm2host_tx_completions_ring3,
  105. wbm2host_tx_completions_ring2,
  106. wbm2host_tx_completions_ring1,
  107. tcl2host_status_ring,
  108. };
  109. struct CE_state;
  110. #define CE_COUNT_MAX 12
  111. #define HIF_MAX_GRP_IRQ 16
  112. #define HIF_MAX_GROUP 8
  113. #ifdef CONFIG_SLUB_DEBUG_ON
  114. #ifndef CONFIG_WIN
  115. #define HIF_CONFIG_SLUB_DEBUG_ON
  116. #endif
  117. #endif
  118. #ifndef NAPI_YIELD_BUDGET_BASED
  119. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  120. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
  121. #else /* PERF build */
  122. #ifdef CONFIG_WIN
  123. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
  124. #else
  125. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
  126. #endif /* CONFIG_WIN */
  127. #endif /* SLUB_DEBUG_ON */
  128. #else /* NAPI_YIELD_BUDGET_BASED */
  129. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
  130. #endif /* NAPI_YIELD_BUDGET_BASED */
  131. #define QCA_NAPI_BUDGET 64
  132. #define QCA_NAPI_DEF_SCALE \
  133. (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
  134. #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
  135. /* NOTE: "napi->scale" can be changed,
  136. * but this does not change the number of buckets
  137. */
  138. #define QCA_NAPI_NUM_BUCKETS 4
  139. /**
  140. * qca_napi_stat - stats structure for execution contexts
  141. * @napi_schedules - number of times the schedule function is called
  142. * @napi_polls - number of times the execution context runs
  143. * @napi_completes - number of times that the generating interrupt is reenabled
  144. * @napi_workdone - cumulative of all work done reported by handler
  145. * @cpu_corrected - incremented when execution context runs on a different core
  146. * than the one that its irq is affined to.
  147. * @napi_budget_uses - histogram of work done per execution run
  148. * @time_limit_reache - count of yields due to time limit threshholds
  149. * @rxpkt_thresh_reached - count of yields due to a work limit
  150. *
  151. * needs to be renamed
  152. */
  153. struct qca_napi_stat {
  154. uint32_t napi_schedules;
  155. uint32_t napi_polls;
  156. uint32_t napi_completes;
  157. uint32_t napi_workdone;
  158. uint32_t cpu_corrected;
  159. uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
  160. uint32_t time_limit_reached;
  161. uint32_t rxpkt_thresh_reached;
  162. unsigned long long napi_max_poll_time;
  163. };
  164. /**
  165. * per NAPI instance data structure
  166. * This data structure holds stuff per NAPI instance.
  167. * Note that, in the current implementation, though scale is
  168. * an instance variable, it is set to the same value for all
  169. * instances.
  170. */
  171. struct qca_napi_info {
  172. struct net_device netdev; /* dummy net_dev */
  173. void *hif_ctx;
  174. struct napi_struct napi;
  175. uint8_t scale; /* currently same on all instances */
  176. uint8_t id;
  177. uint8_t cpu;
  178. int irq;
  179. struct qca_napi_stat stats[NR_CPUS];
  180. #ifdef RECEIVE_OFFLOAD
  181. /* will only be present for data rx CE's */
  182. void (*offld_flush_cb)(void *);
  183. struct napi_struct rx_thread_napi;
  184. struct net_device rx_thread_netdev;
  185. #endif /* RECEIVE_OFFLOAD */
  186. qdf_lro_ctx_t lro_ctx;
  187. };
  188. enum qca_napi_tput_state {
  189. QCA_NAPI_TPUT_UNINITIALIZED,
  190. QCA_NAPI_TPUT_LO,
  191. QCA_NAPI_TPUT_HI
  192. };
  193. enum qca_napi_cpu_state {
  194. QCA_NAPI_CPU_UNINITIALIZED,
  195. QCA_NAPI_CPU_DOWN,
  196. QCA_NAPI_CPU_UP };
  197. /**
  198. * struct qca_napi_cpu - an entry of the napi cpu table
  199. * @core_id: physical core id of the core
  200. * @cluster_id: cluster this core belongs to
  201. * @core_mask: mask to match all core of this cluster
  202. * @thread_mask: mask for this core within the cluster
  203. * @max_freq: maximum clock this core can be clocked at
  204. * same for all cpus of the same core.
  205. * @napis: bitmap of napi instances on this core
  206. * @execs: bitmap of execution contexts on this core
  207. * cluster_nxt: chain to link cores within the same cluster
  208. *
  209. * This structure represents a single entry in the napi cpu
  210. * table. The table is part of struct qca_napi_data.
  211. * This table is initialized by the init function, called while
  212. * the first napi instance is being created, updated by hotplug
  213. * notifier and when cpu affinity decisions are made (by throughput
  214. * detection), and deleted when the last napi instance is removed.
  215. */
  216. struct qca_napi_cpu {
  217. enum qca_napi_cpu_state state;
  218. int core_id;
  219. int cluster_id;
  220. cpumask_t core_mask;
  221. cpumask_t thread_mask;
  222. unsigned int max_freq;
  223. uint32_t napis;
  224. uint32_t execs;
  225. int cluster_nxt; /* index, not pointer */
  226. };
  227. /**
  228. * struct qca_napi_data - collection of napi data for a single hif context
  229. * @hif_softc: pointer to the hif context
  230. * @lock: spinlock used in the event state machine
  231. * @state: state variable used in the napi stat machine
  232. * @ce_map: bit map indicating which ce's have napis running
  233. * @exec_map: bit map of instanciated exec contexts
  234. * @napi_cpu: cpu info for irq affinty
  235. * @lilcl_head:
  236. * @bigcl_head:
  237. * @napi_mode: irq affinity & clock voting mode
  238. * @cpuhp_handler: CPU hotplug event registration handle
  239. */
  240. struct qca_napi_data {
  241. struct hif_softc *hif_softc;
  242. qdf_spinlock_t lock;
  243. uint32_t state;
  244. /* bitmap of created/registered NAPI instances, indexed by pipe_id,
  245. * not used by clients (clients use an id returned by create)
  246. */
  247. uint32_t ce_map;
  248. uint32_t exec_map;
  249. struct qca_napi_info *napis[CE_COUNT_MAX];
  250. struct qca_napi_cpu napi_cpu[NR_CPUS];
  251. int lilcl_head, bigcl_head;
  252. enum qca_napi_tput_state napi_mode;
  253. struct qdf_cpuhp_handler *cpuhp_handler;
  254. uint8_t flags;
  255. };
  256. /**
  257. * struct hif_config_info - Place Holder for hif confiruation
  258. * @enable_self_recovery: Self Recovery
  259. *
  260. * Structure for holding hif ini parameters.
  261. */
  262. struct hif_config_info {
  263. bool enable_self_recovery;
  264. #ifdef FEATURE_RUNTIME_PM
  265. bool enable_runtime_pm;
  266. u_int32_t runtime_pm_delay;
  267. #endif
  268. };
  269. /**
  270. * struct hif_target_info - Target Information
  271. * @target_version: Target Version
  272. * @target_type: Target Type
  273. * @target_revision: Target Revision
  274. * @soc_version: SOC Version
  275. *
  276. * Structure to hold target information.
  277. */
  278. struct hif_target_info {
  279. uint32_t target_version;
  280. uint32_t target_type;
  281. uint32_t target_revision;
  282. uint32_t soc_version;
  283. char *hw_name;
  284. };
  285. struct hif_opaque_softc {
  286. };
  287. /**
  288. * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
  289. *
  290. * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
  291. * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
  292. * minimize power
  293. * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
  294. * platform-specific measures to completely power-off
  295. * the module and associated hardware (i.e. cut power
  296. * supplies)
  297. */
  298. enum HIF_DEVICE_POWER_CHANGE_TYPE {
  299. HIF_DEVICE_POWER_UP,
  300. HIF_DEVICE_POWER_DOWN,
  301. HIF_DEVICE_POWER_CUT
  302. };
  303. /**
  304. * enum hif_enable_type: what triggered the enabling of hif
  305. *
  306. * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
  307. * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
  308. */
  309. enum hif_enable_type {
  310. HIF_ENABLE_TYPE_PROBE,
  311. HIF_ENABLE_TYPE_REINIT,
  312. HIF_ENABLE_TYPE_MAX
  313. };
  314. /**
  315. * enum hif_disable_type: what triggered the disabling of hif
  316. *
  317. * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
  318. * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
  319. * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
  320. * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
  321. */
  322. enum hif_disable_type {
  323. HIF_DISABLE_TYPE_PROBE_ERROR,
  324. HIF_DISABLE_TYPE_REINIT_ERROR,
  325. HIF_DISABLE_TYPE_REMOVE,
  326. HIF_DISABLE_TYPE_SHUTDOWN,
  327. HIF_DISABLE_TYPE_MAX
  328. };
  329. /**
  330. * enum hif_device_config_opcode: configure mode
  331. *
  332. * @HIF_DEVICE_POWER_STATE: device power state
  333. * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
  334. * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
  335. * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
  336. * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
  337. * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
  338. * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
  339. * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
  340. * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
  341. * @HIF_DEVICE_GET_OS_DEVICE: get OS device
  342. * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
  343. * @HIF_BMI_DONE: bmi done
  344. * @HIF_DEVICE_SET_TARGET_TYPE: set target type
  345. * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
  346. * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
  347. */
  348. enum hif_device_config_opcode {
  349. HIF_DEVICE_POWER_STATE = 0,
  350. HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
  351. HIF_DEVICE_GET_MBOX_ADDR,
  352. HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
  353. HIF_DEVICE_GET_IRQ_PROC_MODE,
  354. HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
  355. HIF_DEVICE_POWER_STATE_CHANGE,
  356. HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
  357. HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
  358. HIF_DEVICE_GET_OS_DEVICE,
  359. HIF_DEVICE_DEBUG_BUS_STATE,
  360. HIF_BMI_DONE,
  361. HIF_DEVICE_SET_TARGET_TYPE,
  362. HIF_DEVICE_SET_HTC_CONTEXT,
  363. HIF_DEVICE_GET_HTC_CONTEXT,
  364. };
  365. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  366. struct HID_ACCESS_LOG {
  367. uint32_t seqnum;
  368. bool is_write;
  369. void *addr;
  370. uint32_t value;
  371. };
  372. #endif
  373. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  374. uint32_t value);
  375. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
  376. #define HIF_MAX_DEVICES 1
  377. /**
  378. * struct htc_callbacks - Structure for HTC Callbacks methods
  379. * @context: context to pass to the dsrhandler
  380. * note : rwCompletionHandler is provided the context
  381. * passed to hif_read_write
  382. * @rwCompletionHandler: Read / write completion handler
  383. * @dsrHandler: DSR Handler
  384. */
  385. struct htc_callbacks {
  386. void *context;
  387. QDF_STATUS(*rwCompletionHandler)(void *rwContext, QDF_STATUS status);
  388. QDF_STATUS(*dsrHandler)(void *context);
  389. };
  390. /**
  391. * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
  392. * @context: Private data context
  393. * @set_recovery_in_progress: To Set Driver state for recovery in progress
  394. * @is_recovery_in_progress: Query if driver state is recovery in progress
  395. * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
  396. * @is_driver_unloading: Query if driver is unloading.
  397. *
  398. * This Structure provides callback pointer for HIF to query hdd for driver
  399. * states.
  400. */
  401. struct hif_driver_state_callbacks {
  402. void *context;
  403. void (*set_recovery_in_progress)(void *context, uint8_t val);
  404. bool (*is_recovery_in_progress)(void *context);
  405. bool (*is_load_unload_in_progress)(void *context);
  406. bool (*is_driver_unloading)(void *context);
  407. bool (*is_target_ready)(void *context);
  408. };
  409. /* This API detaches the HTC layer from the HIF device */
  410. void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
  411. /****************************************************************/
  412. /* BMI and Diag window abstraction */
  413. /****************************************************************/
  414. #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
  415. #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
  416. * handled atomically by
  417. * DiagRead/DiagWrite
  418. */
  419. /*
  420. * API to handle HIF-specific BMI message exchanges, this API is synchronous
  421. * and only allowed to be called from a context that can block (sleep)
  422. */
  423. QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
  424. qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
  425. uint8_t *pSendMessage, uint32_t Length,
  426. uint8_t *pResponseMessage,
  427. uint32_t *pResponseLength, uint32_t TimeoutMS);
  428. void hif_register_bmi_callbacks(struct hif_softc *hif_sc);
  429. /*
  430. * APIs to handle HIF specific diagnostic read accesses. These APIs are
  431. * synchronous and only allowed to be called from a context that
  432. * can block (sleep). They are not high performance APIs.
  433. *
  434. * hif_diag_read_access reads a 4 Byte aligned/length value from a
  435. * Target register or memory word.
  436. *
  437. * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
  438. */
  439. QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
  440. uint32_t address, uint32_t *data);
  441. QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
  442. uint8_t *data, int nbytes);
  443. void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
  444. void *ramdump_base, uint32_t address, uint32_t size);
  445. /*
  446. * APIs to handle HIF specific diagnostic write accesses. These APIs are
  447. * synchronous and only allowed to be called from a context that
  448. * can block (sleep).
  449. * They are not high performance APIs.
  450. *
  451. * hif_diag_write_access writes a 4 Byte aligned/length value to a
  452. * Target register or memory word.
  453. *
  454. * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
  455. */
  456. QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
  457. uint32_t address, uint32_t data);
  458. QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
  459. uint32_t address, uint8_t *data, int nbytes);
  460. typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
  461. void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
  462. bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
  463. /*
  464. * Set the FASTPATH_mode_on flag in sc, for use by data path
  465. */
  466. #ifdef WLAN_FEATURE_FASTPATH
  467. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
  468. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
  469. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
  470. int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  471. fastpath_msg_handler handler, void *context);
  472. #else
  473. static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  474. fastpath_msg_handler handler,
  475. void *context)
  476. {
  477. return QDF_STATUS_E_FAILURE;
  478. }
  479. static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
  480. {
  481. return NULL;
  482. }
  483. #endif
  484. /*
  485. * Enable/disable CDC max performance workaround
  486. * For max-performace set this to 0
  487. * To allow SoC to enter sleep set this to 1
  488. */
  489. #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
  490. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  491. qdf_shared_mem_t **ce_sr,
  492. uint32_t *ce_sr_ring_size,
  493. qdf_dma_addr_t *ce_reg_paddr);
  494. /**
  495. * @brief List of callbacks - filled in by HTC.
  496. */
  497. struct hif_msg_callbacks {
  498. void *Context;
  499. /**< context meaningful to HTC */
  500. QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  501. uint32_t transferID,
  502. uint32_t toeplitz_hash_result);
  503. QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  504. uint8_t pipeID);
  505. void (*txResourceAvailHandler)(void *context, uint8_t pipe);
  506. void (*fwEventHandler)(void *context, QDF_STATUS status);
  507. };
  508. enum hif_target_status {
  509. TARGET_STATUS_CONNECTED = 0, /* target connected */
  510. TARGET_STATUS_RESET, /* target got reset */
  511. TARGET_STATUS_EJECT, /* target got ejected */
  512. TARGET_STATUS_SUSPEND /*target got suspend */
  513. };
  514. /**
  515. * enum hif_attribute_flags: configure hif
  516. *
  517. * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
  518. * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
  519. * + No pktlog CE
  520. */
  521. enum hif_attribute_flags {
  522. HIF_LOWDESC_CE_CFG = 1,
  523. HIF_LOWDESC_CE_NO_PKTLOG_CFG
  524. };
  525. #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
  526. (attr |= (v & 0x01) << 5)
  527. #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
  528. (attr |= (v & 0x03) << 6)
  529. #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
  530. (attr |= (v & 0x01) << 13)
  531. #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
  532. (attr |= (v & 0x01) << 14)
  533. #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
  534. (attr |= (v & 0x01) << 15)
  535. #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
  536. (attr |= (v & 0x0FFF) << 16)
  537. #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
  538. (attr |= (v & 0x01) << 30)
  539. struct hif_ul_pipe_info {
  540. unsigned int nentries;
  541. unsigned int nentries_mask;
  542. unsigned int sw_index;
  543. unsigned int write_index; /* cached copy */
  544. unsigned int hw_index; /* cached copy */
  545. void *base_addr_owner_space; /* Host address space */
  546. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  547. };
  548. struct hif_dl_pipe_info {
  549. unsigned int nentries;
  550. unsigned int nentries_mask;
  551. unsigned int sw_index;
  552. unsigned int write_index; /* cached copy */
  553. unsigned int hw_index; /* cached copy */
  554. void *base_addr_owner_space; /* Host address space */
  555. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  556. };
  557. struct hif_pipe_addl_info {
  558. uint32_t pci_mem;
  559. uint32_t ctrl_addr;
  560. struct hif_ul_pipe_info ul_pipe;
  561. struct hif_dl_pipe_info dl_pipe;
  562. };
  563. #ifdef CONFIG_SLUB_DEBUG_ON
  564. #define MSG_FLUSH_NUM 16
  565. #else /* PERF build */
  566. #define MSG_FLUSH_NUM 32
  567. #endif /* SLUB_DEBUG_ON */
  568. struct hif_bus_id;
  569. void hif_claim_device(struct hif_opaque_softc *hif_ctx);
  570. QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
  571. int opcode, void *config, uint32_t config_len);
  572. void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
  573. void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
  574. void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
  575. struct hif_msg_callbacks *callbacks);
  576. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
  577. void hif_stop(struct hif_opaque_softc *hif_ctx);
  578. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
  579. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
  580. void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
  581. uint8_t cmd_id, bool start);
  582. QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  583. uint32_t transferID, uint32_t nbytes,
  584. qdf_nbuf_t wbuf, uint32_t data_attr);
  585. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  586. int force);
  587. void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
  588. void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
  589. uint8_t *DLPipe);
  590. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
  591. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  592. int *dl_is_polled);
  593. uint16_t
  594. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
  595. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
  596. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
  597. void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
  598. bool wait_for_it);
  599. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
  600. #ifndef HIF_PCI
  601. static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  602. {
  603. return 0;
  604. }
  605. #else
  606. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
  607. #endif
  608. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  609. u32 *revision, const char **target_name);
  610. #ifdef RECEIVE_OFFLOAD
  611. /**
  612. * hif_offld_flush_cb_register() - Register the offld flush callback
  613. * @scn: HIF opaque context
  614. * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
  615. * Or GRO/LRO flush when RxThread is not enabled. Called
  616. * with corresponding context for flush.
  617. * Return: None
  618. */
  619. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  620. void (offld_flush_handler)(void *ol_ctx));
  621. /**
  622. * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
  623. * @scn: HIF opaque context
  624. *
  625. * Return: None
  626. */
  627. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
  628. #endif
  629. void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
  630. void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
  631. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  632. int htc_htt_tx_endpoint);
  633. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
  634. enum qdf_bus_type bus_type,
  635. struct hif_driver_state_callbacks *cbk);
  636. void hif_close(struct hif_opaque_softc *hif_ctx);
  637. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  638. void *bdev, const struct hif_bus_id *bid,
  639. enum qdf_bus_type bus_type,
  640. enum hif_enable_type type);
  641. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
  642. void hif_display_stats(struct hif_opaque_softc *hif_ctx);
  643. void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
  644. #ifdef FEATURE_RUNTIME_PM
  645. struct hif_pm_runtime_lock;
  646. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
  647. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
  648. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
  649. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
  650. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
  651. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  652. struct hif_pm_runtime_lock *lock);
  653. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  654. struct hif_pm_runtime_lock *lock);
  655. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  656. struct hif_pm_runtime_lock *lock);
  657. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  658. struct hif_pm_runtime_lock *lock, unsigned int delay);
  659. #else
  660. struct hif_pm_runtime_lock {
  661. const char *name;
  662. };
  663. static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  664. static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  665. {}
  666. static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  667. { return 0; }
  668. static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  669. { return 0; }
  670. static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
  671. const char *name)
  672. { return 0; }
  673. static inline void
  674. hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  675. struct hif_pm_runtime_lock *lock) {}
  676. static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  677. struct hif_pm_runtime_lock *lock)
  678. { return 0; }
  679. static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  680. struct hif_pm_runtime_lock *lock)
  681. { return 0; }
  682. static inline int
  683. hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  684. struct hif_pm_runtime_lock *lock, unsigned int delay)
  685. { return 0; }
  686. #endif
  687. void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
  688. bool is_packet_log_enabled);
  689. void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
  690. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
  691. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
  692. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
  693. #ifdef IPA_OFFLOAD
  694. /**
  695. * hif_get_ipa_hw_type() - get IPA hw type
  696. *
  697. * This API return the IPA hw type.
  698. *
  699. * Return: IPA hw type
  700. */
  701. static inline
  702. enum ipa_hw_type hif_get_ipa_hw_type(void)
  703. {
  704. return ipa_get_hw_type();
  705. }
  706. /**
  707. * hif_get_ipa_present() - get IPA hw status
  708. *
  709. * This API return the IPA hw status.
  710. *
  711. * Return: true if IPA is present or false otherwise
  712. */
  713. static inline
  714. bool hif_get_ipa_present(void)
  715. {
  716. if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
  717. return true;
  718. else
  719. return false;
  720. }
  721. #endif
  722. int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
  723. /**
  724. * hif_bus_ealry_suspend() - stop non wmi tx traffic
  725. * @context: hif context
  726. */
  727. int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
  728. /**
  729. * hif_bus_late_resume() - resume non wmi traffic
  730. * @context: hif context
  731. */
  732. int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
  733. int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
  734. int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
  735. int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
  736. /**
  737. * hif_apps_irqs_enable() - Enables all irqs from the APPS side
  738. * @hif_ctx: an opaque HIF handle to use
  739. *
  740. * As opposed to the standard hif_irq_enable, this function always applies to
  741. * the APPS side kernel interrupt handling.
  742. *
  743. * Return: errno
  744. */
  745. int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
  746. /**
  747. * hif_apps_irqs_disable() - Disables all irqs from the APPS side
  748. * @hif_ctx: an opaque HIF handle to use
  749. *
  750. * As opposed to the standard hif_irq_disable, this function always applies to
  751. * the APPS side kernel interrupt handling.
  752. *
  753. * Return: errno
  754. */
  755. int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
  756. /**
  757. * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
  758. * @hif_ctx: an opaque HIF handle to use
  759. *
  760. * As opposed to the standard hif_irq_enable, this function always applies to
  761. * the APPS side kernel interrupt handling.
  762. *
  763. * Return: errno
  764. */
  765. int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
  766. /**
  767. * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
  768. * @hif_ctx: an opaque HIF handle to use
  769. *
  770. * As opposed to the standard hif_irq_disable, this function always applies to
  771. * the APPS side kernel interrupt handling.
  772. *
  773. * Return: errno
  774. */
  775. int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
  776. #ifdef FEATURE_RUNTIME_PM
  777. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  778. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
  779. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  780. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
  781. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
  782. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
  783. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
  784. #endif
  785. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
  786. int hif_dump_registers(struct hif_opaque_softc *scn);
  787. int ol_copy_ramdump(struct hif_opaque_softc *scn);
  788. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
  789. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  790. u32 *revision, const char **target_name);
  791. bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
  792. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
  793. struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
  794. scn);
  795. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
  796. struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
  797. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
  798. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  799. hif_target_status);
  800. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  801. struct hif_config_info *cfg);
  802. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
  803. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  804. uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
  805. int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
  806. transfer_id, u_int32_t len);
  807. int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
  808. uint32_t transfer_id, uint32_t download_len);
  809. void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
  810. void hif_ce_war_disable(void);
  811. void hif_ce_war_enable(void);
  812. void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
  813. #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
  814. struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
  815. struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
  816. uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
  817. uint32_t pipe_num);
  818. int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
  819. #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
  820. void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
  821. int rx_bundle_cnt);
  822. int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
  823. void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
  824. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
  825. enum hif_exec_type {
  826. HIF_EXEC_NAPI_TYPE,
  827. HIF_EXEC_TASKLET_TYPE,
  828. };
  829. typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
  830. uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
  831. uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  832. uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
  833. void *cb_ctx, const char *context_name,
  834. enum hif_exec_type type, uint32_t scale);
  835. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  836. const char *context_name);
  837. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  838. u_int8_t pipeid,
  839. struct hif_msg_callbacks *callbacks);
  840. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
  841. #ifdef __cplusplus
  842. }
  843. #endif
  844. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
  845. /**
  846. * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
  847. * @hif_ctx - the HIF context to assign the callback to
  848. * @callback - the callback to assign
  849. * @priv - the private data to pass to the callback when invoked
  850. *
  851. * Return: None
  852. */
  853. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  854. void (*callback)(void *),
  855. void *priv);
  856. #ifndef CONFIG_WIN
  857. #ifndef HIF_CE_DEBUG_DATA_BUF
  858. #define HIF_CE_DEBUG_DATA_BUF 0
  859. #endif
  860. #endif
  861. /*
  862. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  863. * for defined here
  864. */
  865. #if HIF_CE_DEBUG_DATA_BUF
  866. ssize_t hif_dump_desc_trace_buf(struct device *dev,
  867. struct device_attribute *attr, char *buf);
  868. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  869. const char *buf, size_t size);
  870. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
  871. const char *buf, size_t size);
  872. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
  873. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
  874. #endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
  875. /**
  876. * hif_set_ce_service_max_yield_time() - sets CE service max yield time
  877. * @hif: hif context
  878. * @ce_service_max_yield_time: CE service max yield time to set
  879. *
  880. * This API storess CE service max yield time in hif context based
  881. * on ini value.
  882. *
  883. * Return: void
  884. */
  885. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  886. uint32_t ce_service_max_yield_time);
  887. /**
  888. * hif_get_ce_service_max_yield_time() - get CE service max yield time
  889. * @hif: hif context
  890. *
  891. * This API returns CE service max yield time.
  892. *
  893. * Return: CE service max yield time
  894. */
  895. unsigned long long
  896. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
  897. /**
  898. * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
  899. * @hif: hif context
  900. * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
  901. *
  902. * This API stores CE service max rx ind flush in hif context based
  903. * on ini value.
  904. *
  905. * Return: void
  906. */
  907. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  908. uint8_t ce_service_max_rx_ind_flush);
  909. #endif /* _HIF_H_ */