hif.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979
  1. /*
  2. * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _HIF_H_
  19. #define _HIF_H_
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif /* __cplusplus */
  23. /* Header files */
  24. #include <qdf_status.h>
  25. #include "qdf_nbuf.h"
  26. #include "qdf_lro.h"
  27. #include "ol_if_athvar.h"
  28. #include <linux/platform_device.h>
  29. #ifdef HIF_PCI
  30. #include <linux/pci.h>
  31. #endif /* HIF_PCI */
  32. #ifdef HIF_USB
  33. #include <linux/usb.h>
  34. #endif /* HIF_USB */
  35. #ifdef IPA_OFFLOAD
  36. #include <linux/ipa.h>
  37. #endif
  38. #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
  39. typedef void __iomem *A_target_id_t;
  40. typedef void *hif_handle_t;
  41. #define HIF_TYPE_AR6002 2
  42. #define HIF_TYPE_AR6003 3
  43. #define HIF_TYPE_AR6004 5
  44. #define HIF_TYPE_AR9888 6
  45. #define HIF_TYPE_AR6320 7
  46. #define HIF_TYPE_AR6320V2 8
  47. /* For attaching Peregrine 2.0 board host_reg_tbl only */
  48. #define HIF_TYPE_AR9888V2 9
  49. #define HIF_TYPE_ADRASTEA 10
  50. #define HIF_TYPE_AR900B 11
  51. #define HIF_TYPE_QCA9984 12
  52. #define HIF_TYPE_IPQ4019 13
  53. #define HIF_TYPE_QCA9888 14
  54. #define HIF_TYPE_QCA8074 15
  55. #define HIF_TYPE_QCA6290 16
  56. #ifdef IPA_OFFLOAD
  57. #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37
  58. #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
  59. #endif
  60. /* enum hif_ic_irq - enum defining integrated chip irq numbers
  61. * defining irq nubers that can be used by external modules like datapath
  62. */
  63. enum hif_ic_irq {
  64. host2wbm_desc_feed = 18,
  65. host2reo_re_injection,
  66. host2reo_command,
  67. host2rxdma_monitor_ring3,
  68. host2rxdma_monitor_ring2,
  69. host2rxdma_monitor_ring1,
  70. reo2host_exception,
  71. wbm2host_rx_release,
  72. reo2host_status,
  73. reo2host_destination_ring4,
  74. reo2host_destination_ring3,
  75. reo2host_destination_ring2,
  76. reo2host_destination_ring1,
  77. rxdma2host_monitor_destination_mac3,
  78. rxdma2host_monitor_destination_mac2,
  79. rxdma2host_monitor_destination_mac1,
  80. ppdu_end_interrupts_mac3,
  81. ppdu_end_interrupts_mac2,
  82. ppdu_end_interrupts_mac1,
  83. rxdma2host_monitor_status_ring_mac3,
  84. rxdma2host_monitor_status_ring_mac2,
  85. rxdma2host_monitor_status_ring_mac1,
  86. host2rxdma_host_buf_ring_mac3,
  87. host2rxdma_host_buf_ring_mac2,
  88. host2rxdma_host_buf_ring_mac1,
  89. rxdma2host_destination_ring_mac3,
  90. rxdma2host_destination_ring_mac2,
  91. rxdma2host_destination_ring_mac1,
  92. host2tcl_input_ring4,
  93. host2tcl_input_ring3,
  94. host2tcl_input_ring2,
  95. host2tcl_input_ring1,
  96. wbm2host_tx_completions_ring3,
  97. wbm2host_tx_completions_ring2,
  98. wbm2host_tx_completions_ring1,
  99. tcl2host_status_ring,
  100. };
  101. struct CE_state;
  102. #define CE_COUNT_MAX 12
  103. #define HIF_MAX_GRP_IRQ 16
  104. #define HIF_MAX_GROUP 8
  105. #ifdef CONFIG_SLUB_DEBUG_ON
  106. #ifndef CONFIG_WIN
  107. #define HIF_CONFIG_SLUB_DEBUG_ON
  108. #endif
  109. #endif
  110. #ifndef NAPI_YIELD_BUDGET_BASED
  111. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  112. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
  113. #else /* PERF build */
  114. #ifdef CONFIG_WIN
  115. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 1
  116. #else
  117. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
  118. #endif /* CONFIG_WIN */
  119. #endif /* SLUB_DEBUG_ON */
  120. #else /* NAPI_YIELD_BUDGET_BASED */
  121. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
  122. #endif /* NAPI_YIELD_BUDGET_BASED */
  123. #define QCA_NAPI_BUDGET 64
  124. #define QCA_NAPI_DEF_SCALE \
  125. (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
  126. #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
  127. /* NOTE: "napi->scale" can be changed,
  128. * but this does not change the number of buckets
  129. */
  130. #define QCA_NAPI_NUM_BUCKETS 4
  131. /**
  132. * qca_napi_stat - stats structure for execution contexts
  133. * @napi_schedules - number of times the schedule function is called
  134. * @napi_polls - number of times the execution context runs
  135. * @napi_completes - number of times that the generating interrupt is reenabled
  136. * @napi_workdone - cumulative of all work done reported by handler
  137. * @cpu_corrected - incremented when execution context runs on a different core
  138. * than the one that its irq is affined to.
  139. * @napi_budget_uses - histogram of work done per execution run
  140. * @time_limit_reache - count of yields due to time limit threshholds
  141. * @rxpkt_thresh_reached - count of yields due to a work limit
  142. *
  143. * needs to be renamed
  144. */
  145. struct qca_napi_stat {
  146. uint32_t napi_schedules;
  147. uint32_t napi_polls;
  148. uint32_t napi_completes;
  149. uint32_t napi_workdone;
  150. uint32_t cpu_corrected;
  151. uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
  152. uint32_t time_limit_reached;
  153. uint32_t rxpkt_thresh_reached;
  154. unsigned long long napi_max_poll_time;
  155. };
  156. /**
  157. * per NAPI instance data structure
  158. * This data structure holds stuff per NAPI instance.
  159. * Note that, in the current implementation, though scale is
  160. * an instance variable, it is set to the same value for all
  161. * instances.
  162. */
  163. struct qca_napi_info {
  164. struct net_device netdev; /* dummy net_dev */
  165. void *hif_ctx;
  166. struct napi_struct napi;
  167. uint8_t scale; /* currently same on all instances */
  168. uint8_t id;
  169. uint8_t cpu;
  170. int irq;
  171. struct qca_napi_stat stats[NR_CPUS];
  172. #ifdef RECEIVE_OFFLOAD
  173. /* will only be present for data rx CE's */
  174. void (*offld_flush_cb)(void *);
  175. struct napi_struct rx_thread_napi;
  176. struct net_device rx_thread_netdev;
  177. #endif /* RECEIVE_OFFLOAD */
  178. qdf_lro_ctx_t lro_ctx;
  179. };
  180. enum qca_napi_tput_state {
  181. QCA_NAPI_TPUT_UNINITIALIZED,
  182. QCA_NAPI_TPUT_LO,
  183. QCA_NAPI_TPUT_HI
  184. };
  185. enum qca_napi_cpu_state {
  186. QCA_NAPI_CPU_UNINITIALIZED,
  187. QCA_NAPI_CPU_DOWN,
  188. QCA_NAPI_CPU_UP };
  189. /**
  190. * struct qca_napi_cpu - an entry of the napi cpu table
  191. * @core_id: physical core id of the core
  192. * @cluster_id: cluster this core belongs to
  193. * @core_mask: mask to match all core of this cluster
  194. * @thread_mask: mask for this core within the cluster
  195. * @max_freq: maximum clock this core can be clocked at
  196. * same for all cpus of the same core.
  197. * @napis: bitmap of napi instances on this core
  198. * @execs: bitmap of execution contexts on this core
  199. * cluster_nxt: chain to link cores within the same cluster
  200. *
  201. * This structure represents a single entry in the napi cpu
  202. * table. The table is part of struct qca_napi_data.
  203. * This table is initialized by the init function, called while
  204. * the first napi instance is being created, updated by hotplug
  205. * notifier and when cpu affinity decisions are made (by throughput
  206. * detection), and deleted when the last napi instance is removed.
  207. */
  208. struct qca_napi_cpu {
  209. enum qca_napi_cpu_state state;
  210. int core_id;
  211. int cluster_id;
  212. cpumask_t core_mask;
  213. cpumask_t thread_mask;
  214. unsigned int max_freq;
  215. uint32_t napis;
  216. uint32_t execs;
  217. int cluster_nxt; /* index, not pointer */
  218. };
  219. /**
  220. * struct qca_napi_data - collection of napi data for a single hif context
  221. * @hif_softc: pointer to the hif context
  222. * @lock: spinlock used in the event state machine
  223. * @state: state variable used in the napi stat machine
  224. * @ce_map: bit map indicating which ce's have napis running
  225. * @exec_map: bit map of instanciated exec contexts
  226. * @napi_cpu: cpu info for irq affinty
  227. * @lilcl_head:
  228. * @bigcl_head:
  229. * @napi_mode: irq affinity & clock voting mode
  230. * @cpuhp_handler: CPU hotplug event registration handle
  231. */
  232. struct qca_napi_data {
  233. struct hif_softc *hif_softc;
  234. qdf_spinlock_t lock;
  235. uint32_t state;
  236. /* bitmap of created/registered NAPI instances, indexed by pipe_id,
  237. * not used by clients (clients use an id returned by create)
  238. */
  239. uint32_t ce_map;
  240. uint32_t exec_map;
  241. struct qca_napi_info *napis[CE_COUNT_MAX];
  242. struct qca_napi_cpu napi_cpu[NR_CPUS];
  243. int lilcl_head, bigcl_head;
  244. enum qca_napi_tput_state napi_mode;
  245. struct qdf_cpuhp_handler *cpuhp_handler;
  246. uint8_t flags;
  247. };
  248. /**
  249. * struct hif_config_info - Place Holder for hif confiruation
  250. * @enable_self_recovery: Self Recovery
  251. *
  252. * Structure for holding hif ini parameters.
  253. */
  254. struct hif_config_info {
  255. bool enable_self_recovery;
  256. #ifdef FEATURE_RUNTIME_PM
  257. bool enable_runtime_pm;
  258. u_int32_t runtime_pm_delay;
  259. #endif
  260. };
  261. /**
  262. * struct hif_target_info - Target Information
  263. * @target_version: Target Version
  264. * @target_type: Target Type
  265. * @target_revision: Target Revision
  266. * @soc_version: SOC Version
  267. *
  268. * Structure to hold target information.
  269. */
  270. struct hif_target_info {
  271. uint32_t target_version;
  272. uint32_t target_type;
  273. uint32_t target_revision;
  274. uint32_t soc_version;
  275. char *hw_name;
  276. };
  277. struct hif_opaque_softc {
  278. };
  279. /**
  280. * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
  281. *
  282. * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
  283. * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
  284. * minimize power
  285. * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
  286. * platform-specific measures to completely power-off
  287. * the module and associated hardware (i.e. cut power
  288. * supplies)
  289. */
  290. enum HIF_DEVICE_POWER_CHANGE_TYPE {
  291. HIF_DEVICE_POWER_UP,
  292. HIF_DEVICE_POWER_DOWN,
  293. HIF_DEVICE_POWER_CUT
  294. };
  295. /**
  296. * enum hif_enable_type: what triggered the enabling of hif
  297. *
  298. * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
  299. * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
  300. */
  301. enum hif_enable_type {
  302. HIF_ENABLE_TYPE_PROBE,
  303. HIF_ENABLE_TYPE_REINIT,
  304. HIF_ENABLE_TYPE_MAX
  305. };
  306. /**
  307. * enum hif_disable_type: what triggered the disabling of hif
  308. *
  309. * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
  310. * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
  311. * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
  312. * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
  313. */
  314. enum hif_disable_type {
  315. HIF_DISABLE_TYPE_PROBE_ERROR,
  316. HIF_DISABLE_TYPE_REINIT_ERROR,
  317. HIF_DISABLE_TYPE_REMOVE,
  318. HIF_DISABLE_TYPE_SHUTDOWN,
  319. HIF_DISABLE_TYPE_MAX
  320. };
  321. /**
  322. * enum hif_device_config_opcode: configure mode
  323. *
  324. * @HIF_DEVICE_POWER_STATE: device power state
  325. * @HIF_DEVICE_GET_MBOX_BLOCK_SIZE: get mbox block size
  326. * @HIF_DEVICE_GET_MBOX_ADDR: get mbox block address
  327. * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
  328. * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
  329. * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
  330. * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
  331. * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
  332. * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
  333. * @HIF_DEVICE_GET_OS_DEVICE: get OS device
  334. * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
  335. * @HIF_BMI_DONE: bmi done
  336. * @HIF_DEVICE_SET_TARGET_TYPE: set target type
  337. * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
  338. * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
  339. */
  340. enum hif_device_config_opcode {
  341. HIF_DEVICE_POWER_STATE = 0,
  342. HIF_DEVICE_GET_MBOX_BLOCK_SIZE,
  343. HIF_DEVICE_GET_MBOX_ADDR,
  344. HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
  345. HIF_DEVICE_GET_IRQ_PROC_MODE,
  346. HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
  347. HIF_DEVICE_POWER_STATE_CHANGE,
  348. HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
  349. HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
  350. HIF_DEVICE_GET_OS_DEVICE,
  351. HIF_DEVICE_DEBUG_BUS_STATE,
  352. HIF_BMI_DONE,
  353. HIF_DEVICE_SET_TARGET_TYPE,
  354. HIF_DEVICE_SET_HTC_CONTEXT,
  355. HIF_DEVICE_GET_HTC_CONTEXT,
  356. };
  357. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  358. struct HID_ACCESS_LOG {
  359. uint32_t seqnum;
  360. bool is_write;
  361. void *addr;
  362. uint32_t value;
  363. };
  364. #endif
  365. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  366. uint32_t value);
  367. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
  368. #define HIF_MAX_DEVICES 1
  369. /**
  370. * struct htc_callbacks - Structure for HTC Callbacks methods
  371. * @context: context to pass to the dsrhandler
  372. * note : rwCompletionHandler is provided the context
  373. * passed to hif_read_write
  374. * @rwCompletionHandler: Read / write completion handler
  375. * @dsrHandler: DSR Handler
  376. */
  377. struct htc_callbacks {
  378. void *context;
  379. QDF_STATUS(*rwCompletionHandler)(void *rwContext, QDF_STATUS status);
  380. QDF_STATUS(*dsrHandler)(void *context);
  381. };
  382. /**
  383. * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
  384. * @context: Private data context
  385. * @set_recovery_in_progress: To Set Driver state for recovery in progress
  386. * @is_recovery_in_progress: Query if driver state is recovery in progress
  387. * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
  388. * @is_driver_unloading: Query if driver is unloading.
  389. *
  390. * This Structure provides callback pointer for HIF to query hdd for driver
  391. * states.
  392. */
  393. struct hif_driver_state_callbacks {
  394. void *context;
  395. void (*set_recovery_in_progress)(void *context, uint8_t val);
  396. bool (*is_recovery_in_progress)(void *context);
  397. bool (*is_load_unload_in_progress)(void *context);
  398. bool (*is_driver_unloading)(void *context);
  399. bool (*is_target_ready)(void *context);
  400. };
  401. /* This API detaches the HTC layer from the HIF device */
  402. void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
  403. /****************************************************************/
  404. /* BMI and Diag window abstraction */
  405. /****************************************************************/
  406. #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
  407. #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
  408. * handled atomically by
  409. * DiagRead/DiagWrite
  410. */
  411. /*
  412. * API to handle HIF-specific BMI message exchanges, this API is synchronous
  413. * and only allowed to be called from a context that can block (sleep)
  414. */
  415. QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
  416. qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
  417. uint8_t *pSendMessage, uint32_t Length,
  418. uint8_t *pResponseMessage,
  419. uint32_t *pResponseLength, uint32_t TimeoutMS);
  420. void hif_register_bmi_callbacks(struct hif_softc *hif_sc);
  421. /*
  422. * APIs to handle HIF specific diagnostic read accesses. These APIs are
  423. * synchronous and only allowed to be called from a context that
  424. * can block (sleep). They are not high performance APIs.
  425. *
  426. * hif_diag_read_access reads a 4 Byte aligned/length value from a
  427. * Target register or memory word.
  428. *
  429. * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
  430. */
  431. QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
  432. uint32_t address, uint32_t *data);
  433. QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
  434. uint8_t *data, int nbytes);
  435. void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
  436. void *ramdump_base, uint32_t address, uint32_t size);
  437. /*
  438. * APIs to handle HIF specific diagnostic write accesses. These APIs are
  439. * synchronous and only allowed to be called from a context that
  440. * can block (sleep).
  441. * They are not high performance APIs.
  442. *
  443. * hif_diag_write_access writes a 4 Byte aligned/length value to a
  444. * Target register or memory word.
  445. *
  446. * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
  447. */
  448. QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
  449. uint32_t address, uint32_t data);
  450. QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
  451. uint32_t address, uint8_t *data, int nbytes);
  452. typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
  453. void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
  454. bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
  455. /*
  456. * Set the FASTPATH_mode_on flag in sc, for use by data path
  457. */
  458. #ifdef WLAN_FEATURE_FASTPATH
  459. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
  460. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
  461. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
  462. int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  463. fastpath_msg_handler handler, void *context);
  464. #else
  465. static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  466. fastpath_msg_handler handler,
  467. void *context)
  468. {
  469. return QDF_STATUS_E_FAILURE;
  470. }
  471. static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
  472. {
  473. return NULL;
  474. }
  475. #endif
  476. /*
  477. * Enable/disable CDC max performance workaround
  478. * For max-performace set this to 0
  479. * To allow SoC to enter sleep set this to 1
  480. */
  481. #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
  482. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  483. qdf_shared_mem_t **ce_sr,
  484. uint32_t *ce_sr_ring_size,
  485. qdf_dma_addr_t *ce_reg_paddr);
  486. /**
  487. * @brief List of callbacks - filled in by HTC.
  488. */
  489. struct hif_msg_callbacks {
  490. void *Context;
  491. /**< context meaningful to HTC */
  492. QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  493. uint32_t transferID,
  494. uint32_t toeplitz_hash_result);
  495. QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  496. uint8_t pipeID);
  497. void (*txResourceAvailHandler)(void *context, uint8_t pipe);
  498. void (*fwEventHandler)(void *context, QDF_STATUS status);
  499. };
  500. enum hif_target_status {
  501. TARGET_STATUS_CONNECTED = 0, /* target connected */
  502. TARGET_STATUS_RESET, /* target got reset */
  503. TARGET_STATUS_EJECT, /* target got ejected */
  504. TARGET_STATUS_SUSPEND /*target got suspend */
  505. };
  506. /**
  507. * enum hif_attribute_flags: configure hif
  508. *
  509. * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
  510. * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
  511. * + No pktlog CE
  512. */
  513. enum hif_attribute_flags {
  514. HIF_LOWDESC_CE_CFG = 1,
  515. HIF_LOWDESC_CE_NO_PKTLOG_CFG
  516. };
  517. #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
  518. (attr |= (v & 0x01) << 5)
  519. #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
  520. (attr |= (v & 0x03) << 6)
  521. #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
  522. (attr |= (v & 0x01) << 13)
  523. #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
  524. (attr |= (v & 0x01) << 14)
  525. #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
  526. (attr |= (v & 0x01) << 15)
  527. #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
  528. (attr |= (v & 0x0FFF) << 16)
  529. #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
  530. (attr |= (v & 0x01) << 30)
  531. struct hif_ul_pipe_info {
  532. unsigned int nentries;
  533. unsigned int nentries_mask;
  534. unsigned int sw_index;
  535. unsigned int write_index; /* cached copy */
  536. unsigned int hw_index; /* cached copy */
  537. void *base_addr_owner_space; /* Host address space */
  538. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  539. };
  540. struct hif_dl_pipe_info {
  541. unsigned int nentries;
  542. unsigned int nentries_mask;
  543. unsigned int sw_index;
  544. unsigned int write_index; /* cached copy */
  545. unsigned int hw_index; /* cached copy */
  546. void *base_addr_owner_space; /* Host address space */
  547. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  548. };
  549. struct hif_pipe_addl_info {
  550. uint32_t pci_mem;
  551. uint32_t ctrl_addr;
  552. struct hif_ul_pipe_info ul_pipe;
  553. struct hif_dl_pipe_info dl_pipe;
  554. };
  555. #ifdef CONFIG_SLUB_DEBUG_ON
  556. #define MSG_FLUSH_NUM 16
  557. #else /* PERF build */
  558. #define MSG_FLUSH_NUM 32
  559. #endif /* SLUB_DEBUG_ON */
  560. struct hif_bus_id;
  561. void hif_claim_device(struct hif_opaque_softc *hif_ctx);
  562. QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
  563. int opcode, void *config, uint32_t config_len);
  564. void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
  565. void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
  566. void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
  567. struct hif_msg_callbacks *callbacks);
  568. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
  569. void hif_stop(struct hif_opaque_softc *hif_ctx);
  570. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
  571. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
  572. void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
  573. uint8_t cmd_id, bool start);
  574. QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  575. uint32_t transferID, uint32_t nbytes,
  576. qdf_nbuf_t wbuf, uint32_t data_attr);
  577. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  578. int force);
  579. void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
  580. void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
  581. uint8_t *DLPipe);
  582. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
  583. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  584. int *dl_is_polled);
  585. uint16_t
  586. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
  587. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
  588. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
  589. void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
  590. bool wait_for_it);
  591. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
  592. #ifndef HIF_PCI
  593. static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  594. {
  595. return 0;
  596. }
  597. #else
  598. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
  599. #endif
  600. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  601. u32 *revision, const char **target_name);
  602. #ifdef RECEIVE_OFFLOAD
  603. /**
  604. * hif_offld_flush_cb_register() - Register the offld flush callback
  605. * @scn: HIF opaque context
  606. * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
  607. * Or GRO/LRO flush when RxThread is not enabled. Called
  608. * with corresponding context for flush.
  609. * Return: None
  610. */
  611. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  612. void (offld_flush_handler)(void *ol_ctx));
  613. /**
  614. * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
  615. * @scn: HIF opaque context
  616. *
  617. * Return: None
  618. */
  619. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
  620. #endif
  621. void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
  622. void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
  623. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  624. int htc_htt_tx_endpoint);
  625. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx, uint32_t mode,
  626. enum qdf_bus_type bus_type,
  627. struct hif_driver_state_callbacks *cbk);
  628. void hif_close(struct hif_opaque_softc *hif_ctx);
  629. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  630. void *bdev, const struct hif_bus_id *bid,
  631. enum qdf_bus_type bus_type,
  632. enum hif_enable_type type);
  633. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
  634. void hif_display_stats(struct hif_opaque_softc *hif_ctx);
  635. void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
  636. #ifdef FEATURE_RUNTIME_PM
  637. struct hif_pm_runtime_lock;
  638. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
  639. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
  640. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
  641. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
  642. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
  643. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  644. struct hif_pm_runtime_lock *lock);
  645. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  646. struct hif_pm_runtime_lock *lock);
  647. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  648. struct hif_pm_runtime_lock *lock);
  649. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  650. struct hif_pm_runtime_lock *lock, unsigned int delay);
  651. #else
  652. struct hif_pm_runtime_lock {
  653. const char *name;
  654. };
  655. static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  656. static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  657. {}
  658. static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  659. { return 0; }
  660. static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  661. { return 0; }
  662. static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
  663. const char *name)
  664. { return 0; }
  665. static inline void
  666. hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  667. struct hif_pm_runtime_lock *lock) {}
  668. static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  669. struct hif_pm_runtime_lock *lock)
  670. { return 0; }
  671. static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  672. struct hif_pm_runtime_lock *lock)
  673. { return 0; }
  674. static inline int
  675. hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  676. struct hif_pm_runtime_lock *lock, unsigned int delay)
  677. { return 0; }
  678. #endif
  679. void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
  680. bool is_packet_log_enabled);
  681. void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
  682. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
  683. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
  684. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
  685. #ifdef IPA_OFFLOAD
  686. /**
  687. * hif_get_ipa_hw_type() - get IPA hw type
  688. *
  689. * This API return the IPA hw type.
  690. *
  691. * Return: IPA hw type
  692. */
  693. static inline
  694. enum ipa_hw_type hif_get_ipa_hw_type(void)
  695. {
  696. return ipa_get_hw_type();
  697. }
  698. /**
  699. * hif_get_ipa_present() - get IPA hw status
  700. *
  701. * This API return the IPA hw status.
  702. *
  703. * Return: true if IPA is present or false otherwise
  704. */
  705. static inline
  706. bool hif_get_ipa_present(void)
  707. {
  708. if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
  709. return true;
  710. else
  711. return false;
  712. }
  713. #endif
  714. int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
  715. /**
  716. * hif_bus_ealry_suspend() - stop non wmi tx traffic
  717. * @context: hif context
  718. */
  719. int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
  720. /**
  721. * hif_bus_late_resume() - resume non wmi traffic
  722. * @context: hif context
  723. */
  724. int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
  725. int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
  726. int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
  727. int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
  728. /**
  729. * hif_apps_irqs_enable() - Enables all irqs from the APPS side
  730. * @hif_ctx: an opaque HIF handle to use
  731. *
  732. * As opposed to the standard hif_irq_enable, this function always applies to
  733. * the APPS side kernel interrupt handling.
  734. *
  735. * Return: errno
  736. */
  737. int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
  738. /**
  739. * hif_apps_irqs_disable() - Disables all irqs from the APPS side
  740. * @hif_ctx: an opaque HIF handle to use
  741. *
  742. * As opposed to the standard hif_irq_disable, this function always applies to
  743. * the APPS side kernel interrupt handling.
  744. *
  745. * Return: errno
  746. */
  747. int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
  748. /**
  749. * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
  750. * @hif_ctx: an opaque HIF handle to use
  751. *
  752. * As opposed to the standard hif_irq_enable, this function always applies to
  753. * the APPS side kernel interrupt handling.
  754. *
  755. * Return: errno
  756. */
  757. int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
  758. /**
  759. * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
  760. * @hif_ctx: an opaque HIF handle to use
  761. *
  762. * As opposed to the standard hif_irq_disable, this function always applies to
  763. * the APPS side kernel interrupt handling.
  764. *
  765. * Return: errno
  766. */
  767. int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
  768. #ifdef FEATURE_RUNTIME_PM
  769. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  770. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
  771. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  772. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
  773. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
  774. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
  775. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
  776. #endif
  777. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
  778. int hif_dump_registers(struct hif_opaque_softc *scn);
  779. int ol_copy_ramdump(struct hif_opaque_softc *scn);
  780. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
  781. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  782. u32 *revision, const char **target_name);
  783. bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
  784. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
  785. struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
  786. scn);
  787. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
  788. struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
  789. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
  790. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  791. hif_target_status);
  792. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  793. struct hif_config_info *cfg);
  794. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
  795. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  796. uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
  797. int hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu, uint32_t
  798. transfer_id, u_int32_t len);
  799. int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
  800. uint32_t transfer_id, uint32_t download_len);
  801. void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
  802. void hif_ce_war_disable(void);
  803. void hif_ce_war_enable(void);
  804. void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
  805. #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
  806. struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
  807. struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
  808. uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
  809. uint32_t pipe_num);
  810. int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
  811. #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
  812. void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
  813. int rx_bundle_cnt);
  814. int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
  815. void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
  816. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
  817. enum hif_exec_type {
  818. HIF_EXEC_NAPI_TYPE,
  819. HIF_EXEC_TASKLET_TYPE,
  820. };
  821. typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
  822. uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
  823. uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  824. uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
  825. void *cb_ctx, const char *context_name,
  826. enum hif_exec_type type, uint32_t scale);
  827. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  828. const char *context_name);
  829. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  830. u_int8_t pipeid,
  831. struct hif_msg_callbacks *callbacks);
  832. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
  833. #ifdef __cplusplus
  834. }
  835. #endif
  836. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
  837. /**
  838. * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
  839. * @hif_ctx - the HIF context to assign the callback to
  840. * @callback - the callback to assign
  841. * @priv - the private data to pass to the callback when invoked
  842. *
  843. * Return: None
  844. */
  845. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  846. void (*callback)(void *),
  847. void *priv);
  848. #ifndef CONFIG_WIN
  849. #ifndef HIF_CE_DEBUG_DATA_BUF
  850. #define HIF_CE_DEBUG_DATA_BUF 0
  851. #endif
  852. #endif
  853. /*
  854. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  855. * for defined here
  856. */
  857. #if HIF_CE_DEBUG_DATA_BUF
  858. ssize_t hif_dump_desc_trace_buf(struct device *dev,
  859. struct device_attribute *attr, char *buf);
  860. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  861. const char *buf, size_t size);
  862. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
  863. const char *buf, size_t size);
  864. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
  865. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
  866. #endif /* Note: for MCL, #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
  867. /**
  868. * hif_set_ce_service_max_yield_time() - sets CE service max yield time
  869. * @hif: hif context
  870. * @ce_service_max_yield_time: CE service max yield time to set
  871. *
  872. * This API storess CE service max yield time in hif context based
  873. * on ini value.
  874. *
  875. * Return: void
  876. */
  877. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  878. uint32_t ce_service_max_yield_time);
  879. /**
  880. * hif_get_ce_service_max_yield_time() - get CE service max yield time
  881. * @hif: hif context
  882. *
  883. * This API returns CE service max yield time.
  884. *
  885. * Return: CE service max yield time
  886. */
  887. unsigned long long
  888. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
  889. /**
  890. * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
  891. * @hif: hif context
  892. * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
  893. *
  894. * This API stores CE service max rx ind flush in hif context based
  895. * on ini value.
  896. *
  897. * Return: void
  898. */
  899. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  900. uint8_t ce_service_max_rx_ind_flush);
  901. #endif /* _HIF_H_ */