hif.h 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028
  1. /*
  2. * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _HIF_H_
  19. #define _HIF_H_
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif /* __cplusplus */
  23. /* Header files */
  24. #include <qdf_status.h>
  25. #include "qdf_nbuf.h"
  26. #include "qdf_lro.h"
  27. #include "ol_if_athvar.h"
  28. #include <linux/platform_device.h>
  29. #ifdef HIF_PCI
  30. #include <linux/pci.h>
  31. #endif /* HIF_PCI */
  32. #ifdef HIF_USB
  33. #include <linux/usb.h>
  34. #endif /* HIF_USB */
  35. #ifdef IPA_OFFLOAD
  36. #include <linux/ipa.h>
  37. #endif
  38. #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
  39. typedef void __iomem *A_target_id_t;
  40. typedef void *hif_handle_t;
  41. #define HIF_TYPE_AR6002 2
  42. #define HIF_TYPE_AR6003 3
  43. #define HIF_TYPE_AR6004 5
  44. #define HIF_TYPE_AR9888 6
  45. #define HIF_TYPE_AR6320 7
  46. #define HIF_TYPE_AR6320V2 8
  47. /* For attaching Peregrine 2.0 board host_reg_tbl only */
  48. #define HIF_TYPE_AR9888V2 9
  49. #define HIF_TYPE_ADRASTEA 10
  50. #define HIF_TYPE_AR900B 11
  51. #define HIF_TYPE_QCA9984 12
  52. #define HIF_TYPE_IPQ4019 13
  53. #define HIF_TYPE_QCA9888 14
  54. #define HIF_TYPE_QCA8074 15
  55. #define HIF_TYPE_QCA6290 16
  56. #define HIF_TYPE_QCN7605 17
  57. #define HIF_TYPE_QCA6390 18
  58. #define HIF_TYPE_QCA8074V2 19
  59. #define HIF_TYPE_QCA6018 20
  60. #ifdef IPA_OFFLOAD
  61. #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37
  62. #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
  63. #endif
  64. /* enum hif_ic_irq - enum defining integrated chip irq numbers
  65. * defining irq nubers that can be used by external modules like datapath
  66. */
  67. enum hif_ic_irq {
  68. host2wbm_desc_feed = 16,
  69. host2reo_re_injection,
  70. host2reo_command,
  71. host2rxdma_monitor_ring3,
  72. host2rxdma_monitor_ring2,
  73. host2rxdma_monitor_ring1,
  74. reo2host_exception,
  75. wbm2host_rx_release,
  76. reo2host_status,
  77. reo2host_destination_ring4,
  78. reo2host_destination_ring3,
  79. reo2host_destination_ring2,
  80. reo2host_destination_ring1,
  81. rxdma2host_monitor_destination_mac3,
  82. rxdma2host_monitor_destination_mac2,
  83. rxdma2host_monitor_destination_mac1,
  84. ppdu_end_interrupts_mac3,
  85. ppdu_end_interrupts_mac2,
  86. ppdu_end_interrupts_mac1,
  87. rxdma2host_monitor_status_ring_mac3,
  88. rxdma2host_monitor_status_ring_mac2,
  89. rxdma2host_monitor_status_ring_mac1,
  90. host2rxdma_host_buf_ring_mac3,
  91. host2rxdma_host_buf_ring_mac2,
  92. host2rxdma_host_buf_ring_mac1,
  93. rxdma2host_destination_ring_mac3,
  94. rxdma2host_destination_ring_mac2,
  95. rxdma2host_destination_ring_mac1,
  96. host2tcl_input_ring4,
  97. host2tcl_input_ring3,
  98. host2tcl_input_ring2,
  99. host2tcl_input_ring1,
  100. wbm2host_tx_completions_ring3,
  101. wbm2host_tx_completions_ring2,
  102. wbm2host_tx_completions_ring1,
  103. tcl2host_status_ring,
  104. };
  105. struct CE_state;
  106. #define CE_COUNT_MAX 12
  107. #define HIF_MAX_GRP_IRQ 16
  108. #ifdef CONFIG_WIN
  109. #define HIF_MAX_GROUP 12
  110. #else
  111. #define HIF_MAX_GROUP 8
  112. #endif
  113. #ifdef CONFIG_SLUB_DEBUG_ON
  114. #ifndef CONFIG_WIN
  115. #define HIF_CONFIG_SLUB_DEBUG_ON
  116. #endif
  117. #endif
  118. #ifndef NAPI_YIELD_BUDGET_BASED
  119. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  120. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
  121. #else
  122. #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
  123. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
  124. #endif
  125. #endif /* SLUB_DEBUG_ON */
  126. #else /* NAPI_YIELD_BUDGET_BASED */
  127. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
  128. #endif /* NAPI_YIELD_BUDGET_BASED */
  129. #define QCA_NAPI_BUDGET 64
  130. #define QCA_NAPI_DEF_SCALE \
  131. (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
  132. #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
  133. /* NOTE: "napi->scale" can be changed,
  134. * but this does not change the number of buckets
  135. */
  136. #define QCA_NAPI_NUM_BUCKETS 4
  137. /**
  138. * qca_napi_stat - stats structure for execution contexts
  139. * @napi_schedules - number of times the schedule function is called
  140. * @napi_polls - number of times the execution context runs
  141. * @napi_completes - number of times that the generating interrupt is reenabled
  142. * @napi_workdone - cumulative of all work done reported by handler
  143. * @cpu_corrected - incremented when execution context runs on a different core
  144. * than the one that its irq is affined to.
  145. * @napi_budget_uses - histogram of work done per execution run
  146. * @time_limit_reache - count of yields due to time limit threshholds
  147. * @rxpkt_thresh_reached - count of yields due to a work limit
  148. *
  149. * needs to be renamed
  150. */
  151. struct qca_napi_stat {
  152. uint32_t napi_schedules;
  153. uint32_t napi_polls;
  154. uint32_t napi_completes;
  155. uint32_t napi_workdone;
  156. uint32_t cpu_corrected;
  157. uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
  158. uint32_t time_limit_reached;
  159. uint32_t rxpkt_thresh_reached;
  160. unsigned long long napi_max_poll_time;
  161. };
  162. /**
  163. * per NAPI instance data structure
  164. * This data structure holds stuff per NAPI instance.
  165. * Note that, in the current implementation, though scale is
  166. * an instance variable, it is set to the same value for all
  167. * instances.
  168. */
  169. struct qca_napi_info {
  170. struct net_device netdev; /* dummy net_dev */
  171. void *hif_ctx;
  172. struct napi_struct napi;
  173. uint8_t scale; /* currently same on all instances */
  174. uint8_t id;
  175. uint8_t cpu;
  176. int irq;
  177. cpumask_t cpumask;
  178. struct qca_napi_stat stats[NR_CPUS];
  179. #ifdef RECEIVE_OFFLOAD
  180. /* will only be present for data rx CE's */
  181. void (*offld_flush_cb)(void *);
  182. struct napi_struct rx_thread_napi;
  183. struct net_device rx_thread_netdev;
  184. #endif /* RECEIVE_OFFLOAD */
  185. qdf_lro_ctx_t lro_ctx;
  186. };
  187. enum qca_napi_tput_state {
  188. QCA_NAPI_TPUT_UNINITIALIZED,
  189. QCA_NAPI_TPUT_LO,
  190. QCA_NAPI_TPUT_HI
  191. };
  192. enum qca_napi_cpu_state {
  193. QCA_NAPI_CPU_UNINITIALIZED,
  194. QCA_NAPI_CPU_DOWN,
  195. QCA_NAPI_CPU_UP };
  196. /**
  197. * struct qca_napi_cpu - an entry of the napi cpu table
  198. * @core_id: physical core id of the core
  199. * @cluster_id: cluster this core belongs to
  200. * @core_mask: mask to match all core of this cluster
  201. * @thread_mask: mask for this core within the cluster
  202. * @max_freq: maximum clock this core can be clocked at
  203. * same for all cpus of the same core.
  204. * @napis: bitmap of napi instances on this core
  205. * @execs: bitmap of execution contexts on this core
  206. * cluster_nxt: chain to link cores within the same cluster
  207. *
  208. * This structure represents a single entry in the napi cpu
  209. * table. The table is part of struct qca_napi_data.
  210. * This table is initialized by the init function, called while
  211. * the first napi instance is being created, updated by hotplug
  212. * notifier and when cpu affinity decisions are made (by throughput
  213. * detection), and deleted when the last napi instance is removed.
  214. */
  215. struct qca_napi_cpu {
  216. enum qca_napi_cpu_state state;
  217. int core_id;
  218. int cluster_id;
  219. cpumask_t core_mask;
  220. cpumask_t thread_mask;
  221. unsigned int max_freq;
  222. uint32_t napis;
  223. uint32_t execs;
  224. int cluster_nxt; /* index, not pointer */
  225. };
  226. /**
  227. * struct qca_napi_data - collection of napi data for a single hif context
  228. * @hif_softc: pointer to the hif context
  229. * @lock: spinlock used in the event state machine
  230. * @state: state variable used in the napi stat machine
  231. * @ce_map: bit map indicating which ce's have napis running
  232. * @exec_map: bit map of instanciated exec contexts
  233. * @user_cpu_affin_map: CPU affinity map from INI config.
  234. * @napi_cpu: cpu info for irq affinty
  235. * @lilcl_head:
  236. * @bigcl_head:
  237. * @napi_mode: irq affinity & clock voting mode
  238. * @cpuhp_handler: CPU hotplug event registration handle
  239. */
  240. struct qca_napi_data {
  241. struct hif_softc *hif_softc;
  242. qdf_spinlock_t lock;
  243. uint32_t state;
  244. /* bitmap of created/registered NAPI instances, indexed by pipe_id,
  245. * not used by clients (clients use an id returned by create)
  246. */
  247. uint32_t ce_map;
  248. uint32_t exec_map;
  249. uint32_t user_cpu_affin_mask;
  250. struct qca_napi_info *napis[CE_COUNT_MAX];
  251. struct qca_napi_cpu napi_cpu[NR_CPUS];
  252. int lilcl_head, bigcl_head;
  253. enum qca_napi_tput_state napi_mode;
  254. struct qdf_cpuhp_handler *cpuhp_handler;
  255. uint8_t flags;
  256. };
  257. /**
  258. * struct hif_config_info - Place Holder for hif confiruation
  259. * @enable_self_recovery: Self Recovery
  260. *
  261. * Structure for holding hif ini parameters.
  262. */
  263. struct hif_config_info {
  264. bool enable_self_recovery;
  265. #ifdef FEATURE_RUNTIME_PM
  266. bool enable_runtime_pm;
  267. u_int32_t runtime_pm_delay;
  268. #endif
  269. };
  270. /**
  271. * struct hif_target_info - Target Information
  272. * @target_version: Target Version
  273. * @target_type: Target Type
  274. * @target_revision: Target Revision
  275. * @soc_version: SOC Version
  276. * @hw_name: pointer to hardware name
  277. *
  278. * Structure to hold target information.
  279. */
  280. struct hif_target_info {
  281. uint32_t target_version;
  282. uint32_t target_type;
  283. uint32_t target_revision;
  284. uint32_t soc_version;
  285. char *hw_name;
  286. };
  287. struct hif_opaque_softc {
  288. };
  289. /**
  290. * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
  291. *
  292. * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
  293. * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
  294. * minimize power
  295. * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
  296. * platform-specific measures to completely power-off
  297. * the module and associated hardware (i.e. cut power
  298. * supplies)
  299. */
  300. enum HIF_DEVICE_POWER_CHANGE_TYPE {
  301. HIF_DEVICE_POWER_UP,
  302. HIF_DEVICE_POWER_DOWN,
  303. HIF_DEVICE_POWER_CUT
  304. };
  305. /**
  306. * enum hif_enable_type: what triggered the enabling of hif
  307. *
  308. * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
  309. * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
  310. */
  311. enum hif_enable_type {
  312. HIF_ENABLE_TYPE_PROBE,
  313. HIF_ENABLE_TYPE_REINIT,
  314. HIF_ENABLE_TYPE_MAX
  315. };
  316. /**
  317. * enum hif_disable_type: what triggered the disabling of hif
  318. *
  319. * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
  320. * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
  321. * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
  322. * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
  323. */
  324. enum hif_disable_type {
  325. HIF_DISABLE_TYPE_PROBE_ERROR,
  326. HIF_DISABLE_TYPE_REINIT_ERROR,
  327. HIF_DISABLE_TYPE_REMOVE,
  328. HIF_DISABLE_TYPE_SHUTDOWN,
  329. HIF_DISABLE_TYPE_MAX
  330. };
  331. /**
  332. * enum hif_device_config_opcode: configure mode
  333. *
  334. * @HIF_DEVICE_POWER_STATE: device power state
  335. * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
  336. * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
  337. * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
  338. * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
  339. * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
  340. * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
  341. * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
  342. * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
  343. * @HIF_DEVICE_GET_OS_DEVICE: get OS device
  344. * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
  345. * @HIF_BMI_DONE: bmi done
  346. * @HIF_DEVICE_SET_TARGET_TYPE: set target type
  347. * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
  348. * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
  349. */
  350. enum hif_device_config_opcode {
  351. HIF_DEVICE_POWER_STATE = 0,
  352. HIF_DEVICE_GET_BLOCK_SIZE,
  353. HIF_DEVICE_GET_FIFO_ADDR,
  354. HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
  355. HIF_DEVICE_GET_IRQ_PROC_MODE,
  356. HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
  357. HIF_DEVICE_POWER_STATE_CHANGE,
  358. HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
  359. HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
  360. HIF_DEVICE_GET_OS_DEVICE,
  361. HIF_DEVICE_DEBUG_BUS_STATE,
  362. HIF_BMI_DONE,
  363. HIF_DEVICE_SET_TARGET_TYPE,
  364. HIF_DEVICE_SET_HTC_CONTEXT,
  365. HIF_DEVICE_GET_HTC_CONTEXT,
  366. };
  367. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  368. struct HID_ACCESS_LOG {
  369. uint32_t seqnum;
  370. bool is_write;
  371. void *addr;
  372. uint32_t value;
  373. };
  374. #endif
  375. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  376. uint32_t value);
  377. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
  378. #define HIF_MAX_DEVICES 1
  379. /**
  380. * struct htc_callbacks - Structure for HTC Callbacks methods
  381. * @context: context to pass to the dsrhandler
  382. * note : rwCompletionHandler is provided the context
  383. * passed to hif_read_write
  384. * @rwCompletionHandler: Read / write completion handler
  385. * @dsrHandler: DSR Handler
  386. */
  387. struct htc_callbacks {
  388. void *context;
  389. QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
  390. QDF_STATUS(*dsr_handler)(void *context);
  391. };
  392. /**
  393. * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
  394. * @context: Private data context
  395. * @set_recovery_in_progress: To Set Driver state for recovery in progress
  396. * @is_recovery_in_progress: Query if driver state is recovery in progress
  397. * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
  398. * @is_driver_unloading: Query if driver is unloading.
  399. *
  400. * This Structure provides callback pointer for HIF to query hdd for driver
  401. * states.
  402. */
  403. struct hif_driver_state_callbacks {
  404. void *context;
  405. void (*set_recovery_in_progress)(void *context, uint8_t val);
  406. bool (*is_recovery_in_progress)(void *context);
  407. bool (*is_load_unload_in_progress)(void *context);
  408. bool (*is_driver_unloading)(void *context);
  409. bool (*is_target_ready)(void *context);
  410. };
  411. /* This API detaches the HTC layer from the HIF device */
  412. void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
  413. /****************************************************************/
  414. /* BMI and Diag window abstraction */
  415. /****************************************************************/
  416. #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
  417. #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
  418. * handled atomically by
  419. * DiagRead/DiagWrite
  420. */
  421. #ifdef WLAN_FEATURE_BMI
  422. /*
  423. * API to handle HIF-specific BMI message exchanges, this API is synchronous
  424. * and only allowed to be called from a context that can block (sleep)
  425. */
  426. QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
  427. qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
  428. uint8_t *pSendMessage, uint32_t Length,
  429. uint8_t *pResponseMessage,
  430. uint32_t *pResponseLength, uint32_t TimeoutMS);
  431. void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
  432. bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
  433. #else /* WLAN_FEATURE_BMI */
  434. static inline void
  435. hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
  436. {
  437. }
  438. static inline bool
  439. hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
  440. {
  441. return false;
  442. }
  443. #endif /* WLAN_FEATURE_BMI */
  444. /*
  445. * APIs to handle HIF specific diagnostic read accesses. These APIs are
  446. * synchronous and only allowed to be called from a context that
  447. * can block (sleep). They are not high performance APIs.
  448. *
  449. * hif_diag_read_access reads a 4 Byte aligned/length value from a
  450. * Target register or memory word.
  451. *
  452. * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
  453. */
  454. QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
  455. uint32_t address, uint32_t *data);
  456. QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
  457. uint8_t *data, int nbytes);
  458. void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
  459. void *ramdump_base, uint32_t address, uint32_t size);
  460. /*
  461. * APIs to handle HIF specific diagnostic write accesses. These APIs are
  462. * synchronous and only allowed to be called from a context that
  463. * can block (sleep).
  464. * They are not high performance APIs.
  465. *
  466. * hif_diag_write_access writes a 4 Byte aligned/length value to a
  467. * Target register or memory word.
  468. *
  469. * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
  470. */
  471. QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
  472. uint32_t address, uint32_t data);
  473. QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
  474. uint32_t address, uint8_t *data, int nbytes);
  475. typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
  476. void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
  477. bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
  478. /*
  479. * Set the FASTPATH_mode_on flag in sc, for use by data path
  480. */
  481. #ifdef WLAN_FEATURE_FASTPATH
  482. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
  483. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
  484. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
  485. int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  486. fastpath_msg_handler handler, void *context);
  487. #else
  488. static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  489. fastpath_msg_handler handler,
  490. void *context)
  491. {
  492. return QDF_STATUS_E_FAILURE;
  493. }
  494. static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
  495. {
  496. return NULL;
  497. }
  498. #endif
  499. /*
  500. * Enable/disable CDC max performance workaround
  501. * For max-performace set this to 0
  502. * To allow SoC to enter sleep set this to 1
  503. */
  504. #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
  505. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  506. qdf_shared_mem_t **ce_sr,
  507. uint32_t *ce_sr_ring_size,
  508. qdf_dma_addr_t *ce_reg_paddr);
  509. /**
  510. * @brief List of callbacks - filled in by HTC.
  511. */
  512. struct hif_msg_callbacks {
  513. void *Context;
  514. /**< context meaningful to HTC */
  515. QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  516. uint32_t transferID,
  517. uint32_t toeplitz_hash_result);
  518. QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  519. uint8_t pipeID);
  520. void (*txResourceAvailHandler)(void *context, uint8_t pipe);
  521. void (*fwEventHandler)(void *context, QDF_STATUS status);
  522. };
  523. enum hif_target_status {
  524. TARGET_STATUS_CONNECTED = 0, /* target connected */
  525. TARGET_STATUS_RESET, /* target got reset */
  526. TARGET_STATUS_EJECT, /* target got ejected */
  527. TARGET_STATUS_SUSPEND /*target got suspend */
  528. };
  529. /**
  530. * enum hif_attribute_flags: configure hif
  531. *
  532. * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
  533. * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
  534. * + No pktlog CE
  535. */
  536. enum hif_attribute_flags {
  537. HIF_LOWDESC_CE_CFG = 1,
  538. HIF_LOWDESC_CE_NO_PKTLOG_CFG
  539. };
  540. #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
  541. (attr |= (v & 0x01) << 5)
  542. #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
  543. (attr |= (v & 0x03) << 6)
  544. #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
  545. (attr |= (v & 0x01) << 13)
  546. #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
  547. (attr |= (v & 0x01) << 14)
  548. #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
  549. (attr |= (v & 0x01) << 15)
  550. #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
  551. (attr |= (v & 0x0FFF) << 16)
  552. #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
  553. (attr |= (v & 0x01) << 30)
  554. struct hif_ul_pipe_info {
  555. unsigned int nentries;
  556. unsigned int nentries_mask;
  557. unsigned int sw_index;
  558. unsigned int write_index; /* cached copy */
  559. unsigned int hw_index; /* cached copy */
  560. void *base_addr_owner_space; /* Host address space */
  561. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  562. };
  563. struct hif_dl_pipe_info {
  564. unsigned int nentries;
  565. unsigned int nentries_mask;
  566. unsigned int sw_index;
  567. unsigned int write_index; /* cached copy */
  568. unsigned int hw_index; /* cached copy */
  569. void *base_addr_owner_space; /* Host address space */
  570. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  571. };
  572. struct hif_pipe_addl_info {
  573. uint32_t pci_mem;
  574. uint32_t ctrl_addr;
  575. struct hif_ul_pipe_info ul_pipe;
  576. struct hif_dl_pipe_info dl_pipe;
  577. };
  578. #ifdef CONFIG_SLUB_DEBUG_ON
  579. #define MSG_FLUSH_NUM 16
  580. #else /* PERF build */
  581. #define MSG_FLUSH_NUM 32
  582. #endif /* SLUB_DEBUG_ON */
  583. struct hif_bus_id;
  584. void hif_claim_device(struct hif_opaque_softc *hif_ctx);
  585. QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
  586. int opcode, void *config, uint32_t config_len);
  587. void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
  588. void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
  589. void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
  590. struct hif_msg_callbacks *callbacks);
  591. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
  592. void hif_stop(struct hif_opaque_softc *hif_ctx);
  593. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
  594. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
  595. void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
  596. uint8_t cmd_id, bool start);
  597. QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  598. uint32_t transferID, uint32_t nbytes,
  599. qdf_nbuf_t wbuf, uint32_t data_attr);
  600. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  601. int force);
  602. void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
  603. void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
  604. uint8_t *DLPipe);
  605. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
  606. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  607. int *dl_is_polled);
  608. uint16_t
  609. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
  610. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
  611. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
  612. void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
  613. bool wait_for_it);
  614. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
  615. #ifndef HIF_PCI
  616. static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  617. {
  618. return 0;
  619. }
  620. #else
  621. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
  622. #endif
  623. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  624. u32 *revision, const char **target_name);
  625. #ifdef RECEIVE_OFFLOAD
  626. /**
  627. * hif_offld_flush_cb_register() - Register the offld flush callback
  628. * @scn: HIF opaque context
  629. * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
  630. * Or GRO/LRO flush when RxThread is not enabled. Called
  631. * with corresponding context for flush.
  632. * Return: None
  633. */
  634. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  635. void (offld_flush_handler)(void *ol_ctx));
  636. /**
  637. * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
  638. * @scn: HIF opaque context
  639. *
  640. * Return: None
  641. */
  642. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
  643. #endif
  644. void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
  645. void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
  646. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  647. int htc_htt_tx_endpoint);
  648. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
  649. enum qdf_bus_type bus_type,
  650. struct hif_driver_state_callbacks *cbk);
  651. void hif_close(struct hif_opaque_softc *hif_ctx);
  652. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  653. void *bdev, const struct hif_bus_id *bid,
  654. enum qdf_bus_type bus_type,
  655. enum hif_enable_type type);
  656. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
  657. void hif_display_stats(struct hif_opaque_softc *hif_ctx);
  658. void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
  659. #ifdef FEATURE_RUNTIME_PM
  660. struct hif_pm_runtime_lock;
  661. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
  662. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
  663. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
  664. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
  665. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
  666. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  667. struct hif_pm_runtime_lock *lock);
  668. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  669. struct hif_pm_runtime_lock *lock);
  670. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  671. struct hif_pm_runtime_lock *lock);
  672. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  673. struct hif_pm_runtime_lock *lock, unsigned int delay);
  674. #else
  675. struct hif_pm_runtime_lock {
  676. const char *name;
  677. };
  678. static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  679. static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  680. {}
  681. static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  682. { return 0; }
  683. static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  684. { return 0; }
  685. static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
  686. const char *name)
  687. { return 0; }
  688. static inline void
  689. hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  690. struct hif_pm_runtime_lock *lock) {}
  691. static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  692. struct hif_pm_runtime_lock *lock)
  693. { return 0; }
  694. static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  695. struct hif_pm_runtime_lock *lock)
  696. { return 0; }
  697. static inline int
  698. hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  699. struct hif_pm_runtime_lock *lock, unsigned int delay)
  700. { return 0; }
  701. #endif
  702. void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
  703. bool is_packet_log_enabled);
  704. void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
  705. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
  706. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
  707. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
  708. #ifdef IPA_OFFLOAD
  709. /**
  710. * hif_get_ipa_hw_type() - get IPA hw type
  711. *
  712. * This API return the IPA hw type.
  713. *
  714. * Return: IPA hw type
  715. */
  716. static inline
  717. enum ipa_hw_type hif_get_ipa_hw_type(void)
  718. {
  719. return ipa_get_hw_type();
  720. }
  721. /**
  722. * hif_get_ipa_present() - get IPA hw status
  723. *
  724. * This API return the IPA hw status.
  725. *
  726. * Return: true if IPA is present or false otherwise
  727. */
  728. static inline
  729. bool hif_get_ipa_present(void)
  730. {
  731. if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
  732. return true;
  733. else
  734. return false;
  735. }
  736. #endif
  737. int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
  738. /**
  739. * hif_bus_ealry_suspend() - stop non wmi tx traffic
  740. * @context: hif context
  741. */
  742. int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
  743. /**
  744. * hif_bus_late_resume() - resume non wmi traffic
  745. * @context: hif context
  746. */
  747. int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
  748. int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
  749. int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
  750. int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
  751. /**
  752. * hif_apps_irqs_enable() - Enables all irqs from the APPS side
  753. * @hif_ctx: an opaque HIF handle to use
  754. *
  755. * As opposed to the standard hif_irq_enable, this function always applies to
  756. * the APPS side kernel interrupt handling.
  757. *
  758. * Return: errno
  759. */
  760. int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
  761. /**
  762. * hif_apps_irqs_disable() - Disables all irqs from the APPS side
  763. * @hif_ctx: an opaque HIF handle to use
  764. *
  765. * As opposed to the standard hif_irq_disable, this function always applies to
  766. * the APPS side kernel interrupt handling.
  767. *
  768. * Return: errno
  769. */
  770. int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
  771. /**
  772. * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
  773. * @hif_ctx: an opaque HIF handle to use
  774. *
  775. * As opposed to the standard hif_irq_enable, this function always applies to
  776. * the APPS side kernel interrupt handling.
  777. *
  778. * Return: errno
  779. */
  780. int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
  781. /**
  782. * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
  783. * @hif_ctx: an opaque HIF handle to use
  784. *
  785. * As opposed to the standard hif_irq_disable, this function always applies to
  786. * the APPS side kernel interrupt handling.
  787. *
  788. * Return: errno
  789. */
  790. int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
  791. #ifdef FEATURE_RUNTIME_PM
  792. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  793. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
  794. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  795. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
  796. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
  797. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
  798. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
  799. #endif
  800. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
  801. int hif_dump_registers(struct hif_opaque_softc *scn);
  802. int ol_copy_ramdump(struct hif_opaque_softc *scn);
  803. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
  804. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  805. u32 *revision, const char **target_name);
  806. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
  807. struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
  808. scn);
  809. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
  810. struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
  811. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
  812. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  813. hif_target_status);
  814. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  815. struct hif_config_info *cfg);
  816. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
  817. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  818. uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
  819. int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
  820. transfer_id, u_int32_t len);
  821. int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
  822. uint32_t transfer_id, uint32_t download_len);
  823. void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
  824. void hif_ce_war_disable(void);
  825. void hif_ce_war_enable(void);
  826. void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
  827. #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
  828. struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
  829. struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
  830. uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
  831. uint32_t pipe_num);
  832. int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
  833. #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
  834. void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
  835. int rx_bundle_cnt);
  836. int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
  837. void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
  838. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
  839. enum hif_exec_type {
  840. HIF_EXEC_NAPI_TYPE,
  841. HIF_EXEC_TASKLET_TYPE,
  842. };
  843. typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
  844. uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
  845. uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  846. uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
  847. void *cb_ctx, const char *context_name,
  848. enum hif_exec_type type, uint32_t scale);
  849. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  850. const char *context_name);
  851. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  852. u_int8_t pipeid,
  853. struct hif_msg_callbacks *callbacks);
  854. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
  855. /* hif_clear_napi_stats() - function clears the stats of the
  856. * latency when called.
  857. * @hif_ctx - the HIF context to assign the callback to
  858. *
  859. * Return: None
  860. */
  861. void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
  862. #ifdef __cplusplus
  863. }
  864. #endif
  865. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
  866. /**
  867. * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
  868. * @hif_ctx - the HIF context to assign the callback to
  869. * @callback - the callback to assign
  870. * @priv - the private data to pass to the callback when invoked
  871. *
  872. * Return: None
  873. */
  874. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  875. void (*callback)(void *),
  876. void *priv);
  877. /*
  878. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  879. * for defined here
  880. */
  881. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  882. ssize_t hif_dump_desc_trace_buf(struct device *dev,
  883. struct device_attribute *attr, char *buf);
  884. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  885. const char *buf, size_t size);
  886. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
  887. const char *buf, size_t size);
  888. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
  889. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
  890. #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
  891. /**
  892. * hif_set_ce_service_max_yield_time() - sets CE service max yield time
  893. * @hif: hif context
  894. * @ce_service_max_yield_time: CE service max yield time to set
  895. *
  896. * This API storess CE service max yield time in hif context based
  897. * on ini value.
  898. *
  899. * Return: void
  900. */
  901. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  902. uint32_t ce_service_max_yield_time);
  903. /**
  904. * hif_get_ce_service_max_yield_time() - get CE service max yield time
  905. * @hif: hif context
  906. *
  907. * This API returns CE service max yield time.
  908. *
  909. * Return: CE service max yield time
  910. */
  911. unsigned long long
  912. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
  913. /**
  914. * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
  915. * @hif: hif context
  916. * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
  917. *
  918. * This API stores CE service max rx ind flush in hif context based
  919. * on ini value.
  920. *
  921. * Return: void
  922. */
  923. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  924. uint8_t ce_service_max_rx_ind_flush);
  925. #ifdef OL_ATH_SMART_LOGGING
  926. /*
  927. * hif_log_ce_dump() - Copy all the CE DEST ring to buf
  928. * @scn : HIF handler
  929. * @buf_cur: Current pointer in ring buffer
  930. * @buf_init:Start of the ring buffer
  931. * @buf_sz: Size of the ring buffer
  932. * @ce: Copy Engine id
  933. * @skb_sz: Max size of the SKB buffer to be copied
  934. *
  935. * Calls the respective function to dump all the CE SRC/DEST ring descriptors
  936. * and buffers pointed by them in to the given buf
  937. *
  938. * Return: Current pointer in ring buffer
  939. */
  940. uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
  941. uint8_t *buf_init, uint32_t buf_sz,
  942. uint32_t ce, uint32_t skb_sz);
  943. #endif /* OL_ATH_SMART_LOGGING */
  944. #endif /* _HIF_H_ */