hif.h 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354
  1. /*
  2. * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _HIF_H_
  19. #define _HIF_H_
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif /* __cplusplus */
  23. /* Header files */
  24. #include <qdf_status.h>
  25. #include "qdf_nbuf.h"
  26. #include "qdf_lro.h"
  27. #include "ol_if_athvar.h"
  28. #include <linux/platform_device.h>
  29. #ifdef HIF_PCI
  30. #include <linux/pci.h>
  31. #endif /* HIF_PCI */
  32. #ifdef HIF_USB
  33. #include <linux/usb.h>
  34. #endif /* HIF_USB */
  35. #ifdef IPA_OFFLOAD
  36. #include <linux/ipa.h>
  37. #endif
  38. #include "cfg_ucfg_api.h"
  39. #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
  40. typedef void __iomem *A_target_id_t;
  41. typedef void *hif_handle_t;
  42. #define HIF_TYPE_AR6002 2
  43. #define HIF_TYPE_AR6003 3
  44. #define HIF_TYPE_AR6004 5
  45. #define HIF_TYPE_AR9888 6
  46. #define HIF_TYPE_AR6320 7
  47. #define HIF_TYPE_AR6320V2 8
  48. /* For attaching Peregrine 2.0 board host_reg_tbl only */
  49. #define HIF_TYPE_AR9888V2 9
  50. #define HIF_TYPE_ADRASTEA 10
  51. #define HIF_TYPE_AR900B 11
  52. #define HIF_TYPE_QCA9984 12
  53. #define HIF_TYPE_IPQ4019 13
  54. #define HIF_TYPE_QCA9888 14
  55. #define HIF_TYPE_QCA8074 15
  56. #define HIF_TYPE_QCA6290 16
  57. #define HIF_TYPE_QCN7605 17
  58. #define HIF_TYPE_QCA6390 18
  59. #define HIF_TYPE_QCA8074V2 19
  60. #define HIF_TYPE_QCA6018 20
  61. #define HIF_TYPE_QCN9000 21
  62. #define HIF_TYPE_QCA6490 22
  63. #define HIF_TYPE_QCA6750 23
  64. #define HIF_TYPE_QCA5018 24
  65. #ifdef IPA_OFFLOAD
  66. #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37
  67. #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
  68. #endif
  69. /* enum hif_ic_irq - enum defining integrated chip irq numbers
  70. * defining irq nubers that can be used by external modules like datapath
  71. */
  72. enum hif_ic_irq {
  73. host2wbm_desc_feed = 16,
  74. host2reo_re_injection,
  75. host2reo_command,
  76. host2rxdma_monitor_ring3,
  77. host2rxdma_monitor_ring2,
  78. host2rxdma_monitor_ring1,
  79. reo2host_exception,
  80. wbm2host_rx_release,
  81. reo2host_status,
  82. reo2host_destination_ring4,
  83. reo2host_destination_ring3,
  84. reo2host_destination_ring2,
  85. reo2host_destination_ring1,
  86. rxdma2host_monitor_destination_mac3,
  87. rxdma2host_monitor_destination_mac2,
  88. rxdma2host_monitor_destination_mac1,
  89. ppdu_end_interrupts_mac3,
  90. ppdu_end_interrupts_mac2,
  91. ppdu_end_interrupts_mac1,
  92. rxdma2host_monitor_status_ring_mac3,
  93. rxdma2host_monitor_status_ring_mac2,
  94. rxdma2host_monitor_status_ring_mac1,
  95. host2rxdma_host_buf_ring_mac3,
  96. host2rxdma_host_buf_ring_mac2,
  97. host2rxdma_host_buf_ring_mac1,
  98. rxdma2host_destination_ring_mac3,
  99. rxdma2host_destination_ring_mac2,
  100. rxdma2host_destination_ring_mac1,
  101. host2tcl_input_ring4,
  102. host2tcl_input_ring3,
  103. host2tcl_input_ring2,
  104. host2tcl_input_ring1,
  105. wbm2host_tx_completions_ring3,
  106. wbm2host_tx_completions_ring2,
  107. wbm2host_tx_completions_ring1,
  108. tcl2host_status_ring,
  109. };
  110. struct CE_state;
  111. #define CE_COUNT_MAX 12
  112. #define HIF_MAX_GRP_IRQ 16
  113. #ifndef HIF_MAX_GROUP
  114. #define HIF_MAX_GROUP 7
  115. #endif
  116. #ifndef NAPI_YIELD_BUDGET_BASED
  117. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  118. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 3
  119. #else
  120. #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
  121. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
  122. #endif
  123. #endif /* SLUB_DEBUG_ON */
  124. #else /* NAPI_YIELD_BUDGET_BASED */
  125. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
  126. #endif /* NAPI_YIELD_BUDGET_BASED */
  127. #define QCA_NAPI_BUDGET 64
  128. #define QCA_NAPI_DEF_SCALE \
  129. (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
  130. #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
  131. /* NOTE: "napi->scale" can be changed,
  132. * but this does not change the number of buckets
  133. */
  134. #define QCA_NAPI_NUM_BUCKETS 4
  135. /**
  136. * qca_napi_stat - stats structure for execution contexts
  137. * @napi_schedules - number of times the schedule function is called
  138. * @napi_polls - number of times the execution context runs
  139. * @napi_completes - number of times that the generating interrupt is reenabled
  140. * @napi_workdone - cumulative of all work done reported by handler
  141. * @cpu_corrected - incremented when execution context runs on a different core
  142. * than the one that its irq is affined to.
  143. * @napi_budget_uses - histogram of work done per execution run
  144. * @time_limit_reache - count of yields due to time limit threshholds
  145. * @rxpkt_thresh_reached - count of yields due to a work limit
  146. * @poll_time_buckets - histogram of poll times for the napi
  147. *
  148. */
  149. struct qca_napi_stat {
  150. uint32_t napi_schedules;
  151. uint32_t napi_polls;
  152. uint32_t napi_completes;
  153. uint32_t napi_workdone;
  154. uint32_t cpu_corrected;
  155. uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
  156. uint32_t time_limit_reached;
  157. uint32_t rxpkt_thresh_reached;
  158. unsigned long long napi_max_poll_time;
  159. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  160. uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
  161. #endif
  162. };
  163. /**
  164. * per NAPI instance data structure
  165. * This data structure holds stuff per NAPI instance.
  166. * Note that, in the current implementation, though scale is
  167. * an instance variable, it is set to the same value for all
  168. * instances.
  169. */
  170. struct qca_napi_info {
  171. struct net_device netdev; /* dummy net_dev */
  172. void *hif_ctx;
  173. struct napi_struct napi;
  174. uint8_t scale; /* currently same on all instances */
  175. uint8_t id;
  176. uint8_t cpu;
  177. int irq;
  178. cpumask_t cpumask;
  179. struct qca_napi_stat stats[NR_CPUS];
  180. #ifdef RECEIVE_OFFLOAD
  181. /* will only be present for data rx CE's */
  182. void (*offld_flush_cb)(void *);
  183. struct napi_struct rx_thread_napi;
  184. struct net_device rx_thread_netdev;
  185. #endif /* RECEIVE_OFFLOAD */
  186. qdf_lro_ctx_t lro_ctx;
  187. };
  188. enum qca_napi_tput_state {
  189. QCA_NAPI_TPUT_UNINITIALIZED,
  190. QCA_NAPI_TPUT_LO,
  191. QCA_NAPI_TPUT_HI
  192. };
  193. enum qca_napi_cpu_state {
  194. QCA_NAPI_CPU_UNINITIALIZED,
  195. QCA_NAPI_CPU_DOWN,
  196. QCA_NAPI_CPU_UP };
  197. /**
  198. * struct qca_napi_cpu - an entry of the napi cpu table
  199. * @core_id: physical core id of the core
  200. * @cluster_id: cluster this core belongs to
  201. * @core_mask: mask to match all core of this cluster
  202. * @thread_mask: mask for this core within the cluster
  203. * @max_freq: maximum clock this core can be clocked at
  204. * same for all cpus of the same core.
  205. * @napis: bitmap of napi instances on this core
  206. * @execs: bitmap of execution contexts on this core
  207. * cluster_nxt: chain to link cores within the same cluster
  208. *
  209. * This structure represents a single entry in the napi cpu
  210. * table. The table is part of struct qca_napi_data.
  211. * This table is initialized by the init function, called while
  212. * the first napi instance is being created, updated by hotplug
  213. * notifier and when cpu affinity decisions are made (by throughput
  214. * detection), and deleted when the last napi instance is removed.
  215. */
  216. struct qca_napi_cpu {
  217. enum qca_napi_cpu_state state;
  218. int core_id;
  219. int cluster_id;
  220. cpumask_t core_mask;
  221. cpumask_t thread_mask;
  222. unsigned int max_freq;
  223. uint32_t napis;
  224. uint32_t execs;
  225. int cluster_nxt; /* index, not pointer */
  226. };
  227. /**
  228. * struct qca_napi_data - collection of napi data for a single hif context
  229. * @hif_softc: pointer to the hif context
  230. * @lock: spinlock used in the event state machine
  231. * @state: state variable used in the napi stat machine
  232. * @ce_map: bit map indicating which ce's have napis running
  233. * @exec_map: bit map of instanciated exec contexts
  234. * @user_cpu_affin_map: CPU affinity map from INI config.
  235. * @napi_cpu: cpu info for irq affinty
  236. * @lilcl_head:
  237. * @bigcl_head:
  238. * @napi_mode: irq affinity & clock voting mode
  239. * @cpuhp_handler: CPU hotplug event registration handle
  240. */
  241. struct qca_napi_data {
  242. struct hif_softc *hif_softc;
  243. qdf_spinlock_t lock;
  244. uint32_t state;
  245. /* bitmap of created/registered NAPI instances, indexed by pipe_id,
  246. * not used by clients (clients use an id returned by create)
  247. */
  248. uint32_t ce_map;
  249. uint32_t exec_map;
  250. uint32_t user_cpu_affin_mask;
  251. struct qca_napi_info *napis[CE_COUNT_MAX];
  252. struct qca_napi_cpu napi_cpu[NR_CPUS];
  253. int lilcl_head, bigcl_head;
  254. enum qca_napi_tput_state napi_mode;
  255. struct qdf_cpuhp_handler *cpuhp_handler;
  256. uint8_t flags;
  257. };
  258. /**
  259. * struct hif_config_info - Place Holder for HIF configuration
  260. * @enable_self_recovery: Self Recovery
  261. * @enable_runtime_pm: Enable Runtime PM
  262. * @runtime_pm_delay: Runtime PM Delay
  263. * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
  264. *
  265. * Structure for holding HIF ini parameters.
  266. */
  267. struct hif_config_info {
  268. bool enable_self_recovery;
  269. #ifdef FEATURE_RUNTIME_PM
  270. uint8_t enable_runtime_pm;
  271. u_int32_t runtime_pm_delay;
  272. #endif
  273. uint64_t rx_softirq_max_yield_duration_ns;
  274. };
  275. /**
  276. * struct hif_target_info - Target Information
  277. * @target_version: Target Version
  278. * @target_type: Target Type
  279. * @target_revision: Target Revision
  280. * @soc_version: SOC Version
  281. * @hw_name: pointer to hardware name
  282. *
  283. * Structure to hold target information.
  284. */
  285. struct hif_target_info {
  286. uint32_t target_version;
  287. uint32_t target_type;
  288. uint32_t target_revision;
  289. uint32_t soc_version;
  290. char *hw_name;
  291. };
  292. struct hif_opaque_softc {
  293. };
  294. /**
  295. * enum hif_event_type - Type of DP events to be recorded
  296. * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
  297. * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
  298. * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
  299. * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
  300. */
  301. enum hif_event_type {
  302. HIF_EVENT_IRQ_TRIGGER,
  303. HIF_EVENT_BH_SCHED,
  304. HIF_EVENT_SRNG_ACCESS_START,
  305. HIF_EVENT_SRNG_ACCESS_END,
  306. };
  307. #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
  308. /* HIF_EVENT_HIST_MAX should always be power of 2 */
  309. #define HIF_EVENT_HIST_MAX 512
  310. #define HIF_NUM_INT_CONTEXTS HIF_MAX_GROUP
  311. #define HIF_EVENT_HIST_DISABLE_MASK 0
  312. /**
  313. * struct hif_event_record - an entry of the DP event history
  314. * @hal_ring_id: ring id for which event is recorded
  315. * @hp: head pointer of the ring (may not be applicable for all events)
  316. * @tp: tail pointer of the ring (may not be applicable for all events)
  317. * @cpu_id: cpu id on which the event occurred
  318. * @timestamp: timestamp when event occurred
  319. * @type: type of the event
  320. *
  321. * This structure represents the information stored for every datapath
  322. * event which is logged in the history.
  323. */
  324. struct hif_event_record {
  325. uint8_t hal_ring_id;
  326. uint32_t hp;
  327. uint32_t tp;
  328. int cpu_id;
  329. uint64_t timestamp;
  330. enum hif_event_type type;
  331. };
  332. /**
  333. * struct hif_event_history - history for one interrupt group
  334. * @index: index to store new event
  335. * @event: event entry
  336. *
  337. * This structure represents the datapath history for one
  338. * interrupt group.
  339. */
  340. struct hif_event_history {
  341. qdf_atomic_t index;
  342. struct hif_event_record event[HIF_EVENT_HIST_MAX];
  343. };
  344. /**
  345. * hif_hist_record_event() - Record one datapath event in history
  346. * @hif_ctx: HIF opaque context
  347. * @event: DP event entry
  348. * @intr_grp_id: interrupt group ID registered with hif
  349. *
  350. * Return: None
  351. */
  352. void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
  353. struct hif_event_record *event,
  354. uint8_t intr_grp_id);
  355. /**
  356. * hif_record_event() - Wrapper function to form and record DP event
  357. * @hif_ctx: HIF opaque context
  358. * @intr_grp_id: interrupt group ID registered with hif
  359. * @hal_ring_id: ring id for which event is recorded
  360. * @hp: head pointer index of the srng
  361. * @tp: tail pointer index of the srng
  362. * @type: type of the event to be logged in history
  363. *
  364. * Return: None
  365. */
  366. static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
  367. uint8_t intr_grp_id,
  368. uint8_t hal_ring_id,
  369. uint32_t hp,
  370. uint32_t tp,
  371. enum hif_event_type type)
  372. {
  373. struct hif_event_record event;
  374. event.hal_ring_id = hal_ring_id;
  375. event.hp = hp;
  376. event.tp = tp;
  377. event.type = type;
  378. return hif_hist_record_event(hif_ctx, &event,
  379. intr_grp_id);
  380. }
  381. #else
  382. static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
  383. uint8_t intr_grp_id,
  384. uint8_t hal_ring_id,
  385. uint32_t hp,
  386. uint32_t tp,
  387. enum hif_event_type type)
  388. {
  389. }
  390. #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
  391. /**
  392. * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
  393. *
  394. * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
  395. * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
  396. * minimize power
  397. * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
  398. * platform-specific measures to completely power-off
  399. * the module and associated hardware (i.e. cut power
  400. * supplies)
  401. */
  402. enum HIF_DEVICE_POWER_CHANGE_TYPE {
  403. HIF_DEVICE_POWER_UP,
  404. HIF_DEVICE_POWER_DOWN,
  405. HIF_DEVICE_POWER_CUT
  406. };
  407. /**
  408. * enum hif_enable_type: what triggered the enabling of hif
  409. *
  410. * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
  411. * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
  412. */
  413. enum hif_enable_type {
  414. HIF_ENABLE_TYPE_PROBE,
  415. HIF_ENABLE_TYPE_REINIT,
  416. HIF_ENABLE_TYPE_MAX
  417. };
  418. /**
  419. * enum hif_disable_type: what triggered the disabling of hif
  420. *
  421. * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
  422. * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
  423. * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
  424. * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
  425. */
  426. enum hif_disable_type {
  427. HIF_DISABLE_TYPE_PROBE_ERROR,
  428. HIF_DISABLE_TYPE_REINIT_ERROR,
  429. HIF_DISABLE_TYPE_REMOVE,
  430. HIF_DISABLE_TYPE_SHUTDOWN,
  431. HIF_DISABLE_TYPE_MAX
  432. };
  433. /**
  434. * enum hif_device_config_opcode: configure mode
  435. *
  436. * @HIF_DEVICE_POWER_STATE: device power state
  437. * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
  438. * @HIF_DEVICE_GET_ADDR: get block address
  439. * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
  440. * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
  441. * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
  442. * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
  443. * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
  444. * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
  445. * @HIF_DEVICE_GET_OS_DEVICE: get OS device
  446. * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
  447. * @HIF_BMI_DONE: bmi done
  448. * @HIF_DEVICE_SET_TARGET_TYPE: set target type
  449. * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
  450. * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
  451. */
  452. enum hif_device_config_opcode {
  453. HIF_DEVICE_POWER_STATE = 0,
  454. HIF_DEVICE_GET_BLOCK_SIZE,
  455. HIF_DEVICE_GET_FIFO_ADDR,
  456. HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
  457. HIF_DEVICE_GET_IRQ_PROC_MODE,
  458. HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
  459. HIF_DEVICE_POWER_STATE_CHANGE,
  460. HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
  461. HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
  462. HIF_DEVICE_GET_OS_DEVICE,
  463. HIF_DEVICE_DEBUG_BUS_STATE,
  464. HIF_BMI_DONE,
  465. HIF_DEVICE_SET_TARGET_TYPE,
  466. HIF_DEVICE_SET_HTC_CONTEXT,
  467. HIF_DEVICE_GET_HTC_CONTEXT,
  468. };
  469. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  470. struct HID_ACCESS_LOG {
  471. uint32_t seqnum;
  472. bool is_write;
  473. void *addr;
  474. uint32_t value;
  475. };
  476. #endif
  477. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  478. uint32_t value);
  479. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
  480. #define HIF_MAX_DEVICES 1
  481. /**
  482. * struct htc_callbacks - Structure for HTC Callbacks methods
  483. * @context: context to pass to the dsrhandler
  484. * note : rwCompletionHandler is provided the context
  485. * passed to hif_read_write
  486. * @rwCompletionHandler: Read / write completion handler
  487. * @dsrHandler: DSR Handler
  488. */
  489. struct htc_callbacks {
  490. void *context;
  491. QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
  492. QDF_STATUS(*dsr_handler)(void *context);
  493. };
  494. /**
  495. * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
  496. * @context: Private data context
  497. * @set_recovery_in_progress: To Set Driver state for recovery in progress
  498. * @is_recovery_in_progress: Query if driver state is recovery in progress
  499. * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
  500. * @is_driver_unloading: Query if driver is unloading.
  501. * @get_bandwidth_level: Query current bandwidth level for the driver
  502. * This Structure provides callback pointer for HIF to query hdd for driver
  503. * states.
  504. */
  505. struct hif_driver_state_callbacks {
  506. void *context;
  507. void (*set_recovery_in_progress)(void *context, uint8_t val);
  508. bool (*is_recovery_in_progress)(void *context);
  509. bool (*is_load_unload_in_progress)(void *context);
  510. bool (*is_driver_unloading)(void *context);
  511. bool (*is_target_ready)(void *context);
  512. int (*get_bandwidth_level)(void *context);
  513. };
  514. /* This API detaches the HTC layer from the HIF device */
  515. void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
  516. /****************************************************************/
  517. /* BMI and Diag window abstraction */
  518. /****************************************************************/
  519. #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
  520. #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
  521. * handled atomically by
  522. * DiagRead/DiagWrite
  523. */
  524. #ifdef WLAN_FEATURE_BMI
  525. /*
  526. * API to handle HIF-specific BMI message exchanges, this API is synchronous
  527. * and only allowed to be called from a context that can block (sleep)
  528. */
  529. QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
  530. qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
  531. uint8_t *pSendMessage, uint32_t Length,
  532. uint8_t *pResponseMessage,
  533. uint32_t *pResponseLength, uint32_t TimeoutMS);
  534. void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
  535. bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
  536. #else /* WLAN_FEATURE_BMI */
  537. static inline void
  538. hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
  539. {
  540. }
  541. static inline bool
  542. hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
  543. {
  544. return false;
  545. }
  546. #endif /* WLAN_FEATURE_BMI */
  547. /*
  548. * APIs to handle HIF specific diagnostic read accesses. These APIs are
  549. * synchronous and only allowed to be called from a context that
  550. * can block (sleep). They are not high performance APIs.
  551. *
  552. * hif_diag_read_access reads a 4 Byte aligned/length value from a
  553. * Target register or memory word.
  554. *
  555. * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
  556. */
  557. QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
  558. uint32_t address, uint32_t *data);
  559. QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
  560. uint8_t *data, int nbytes);
  561. void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
  562. void *ramdump_base, uint32_t address, uint32_t size);
  563. /*
  564. * APIs to handle HIF specific diagnostic write accesses. These APIs are
  565. * synchronous and only allowed to be called from a context that
  566. * can block (sleep).
  567. * They are not high performance APIs.
  568. *
  569. * hif_diag_write_access writes a 4 Byte aligned/length value to a
  570. * Target register or memory word.
  571. *
  572. * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
  573. */
  574. QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
  575. uint32_t address, uint32_t data);
  576. QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
  577. uint32_t address, uint8_t *data, int nbytes);
  578. typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
  579. void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
  580. bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
  581. /*
  582. * Set the FASTPATH_mode_on flag in sc, for use by data path
  583. */
  584. #ifdef WLAN_FEATURE_FASTPATH
  585. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
  586. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
  587. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
  588. int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  589. fastpath_msg_handler handler, void *context);
  590. #else
  591. static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  592. fastpath_msg_handler handler,
  593. void *context)
  594. {
  595. return QDF_STATUS_E_FAILURE;
  596. }
  597. static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
  598. {
  599. return NULL;
  600. }
  601. #endif
  602. /*
  603. * Enable/disable CDC max performance workaround
  604. * For max-performace set this to 0
  605. * To allow SoC to enter sleep set this to 1
  606. */
  607. #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
  608. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  609. qdf_shared_mem_t **ce_sr,
  610. uint32_t *ce_sr_ring_size,
  611. qdf_dma_addr_t *ce_reg_paddr);
  612. /**
  613. * @brief List of callbacks - filled in by HTC.
  614. */
  615. struct hif_msg_callbacks {
  616. void *Context;
  617. /**< context meaningful to HTC */
  618. QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  619. uint32_t transferID,
  620. uint32_t toeplitz_hash_result);
  621. QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  622. uint8_t pipeID);
  623. void (*txResourceAvailHandler)(void *context, uint8_t pipe);
  624. void (*fwEventHandler)(void *context, QDF_STATUS status);
  625. void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
  626. };
  627. enum hif_target_status {
  628. TARGET_STATUS_CONNECTED = 0, /* target connected */
  629. TARGET_STATUS_RESET, /* target got reset */
  630. TARGET_STATUS_EJECT, /* target got ejected */
  631. TARGET_STATUS_SUSPEND /*target got suspend */
  632. };
  633. /**
  634. * enum hif_attribute_flags: configure hif
  635. *
  636. * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
  637. * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
  638. * + No pktlog CE
  639. */
  640. enum hif_attribute_flags {
  641. HIF_LOWDESC_CE_CFG = 1,
  642. HIF_LOWDESC_CE_NO_PKTLOG_CFG
  643. };
  644. #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
  645. (attr |= (v & 0x01) << 5)
  646. #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
  647. (attr |= (v & 0x03) << 6)
  648. #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
  649. (attr |= (v & 0x01) << 13)
  650. #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
  651. (attr |= (v & 0x01) << 14)
  652. #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
  653. (attr |= (v & 0x01) << 15)
  654. #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
  655. (attr |= (v & 0x0FFF) << 16)
  656. #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
  657. (attr |= (v & 0x01) << 30)
  658. struct hif_ul_pipe_info {
  659. unsigned int nentries;
  660. unsigned int nentries_mask;
  661. unsigned int sw_index;
  662. unsigned int write_index; /* cached copy */
  663. unsigned int hw_index; /* cached copy */
  664. void *base_addr_owner_space; /* Host address space */
  665. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  666. };
  667. struct hif_dl_pipe_info {
  668. unsigned int nentries;
  669. unsigned int nentries_mask;
  670. unsigned int sw_index;
  671. unsigned int write_index; /* cached copy */
  672. unsigned int hw_index; /* cached copy */
  673. void *base_addr_owner_space; /* Host address space */
  674. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  675. };
  676. struct hif_pipe_addl_info {
  677. uint32_t pci_mem;
  678. uint32_t ctrl_addr;
  679. struct hif_ul_pipe_info ul_pipe;
  680. struct hif_dl_pipe_info dl_pipe;
  681. };
  682. #ifdef CONFIG_SLUB_DEBUG_ON
  683. #define MSG_FLUSH_NUM 16
  684. #else /* PERF build */
  685. #define MSG_FLUSH_NUM 32
  686. #endif /* SLUB_DEBUG_ON */
  687. struct hif_bus_id;
  688. void hif_claim_device(struct hif_opaque_softc *hif_ctx);
  689. QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
  690. int opcode, void *config, uint32_t config_len);
  691. void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
  692. void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
  693. void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
  694. struct hif_msg_callbacks *callbacks);
  695. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
  696. void hif_stop(struct hif_opaque_softc *hif_ctx);
  697. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
  698. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
  699. void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
  700. uint8_t cmd_id, bool start);
  701. QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  702. uint32_t transferID, uint32_t nbytes,
  703. qdf_nbuf_t wbuf, uint32_t data_attr);
  704. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  705. int force);
  706. void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
  707. void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
  708. uint8_t *DLPipe);
  709. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
  710. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  711. int *dl_is_polled);
  712. uint16_t
  713. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
  714. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
  715. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
  716. void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
  717. bool wait_for_it);
  718. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
  719. #ifndef HIF_PCI
  720. static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  721. {
  722. return 0;
  723. }
  724. #else
  725. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
  726. #endif
  727. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  728. u32 *revision, const char **target_name);
  729. #ifdef RECEIVE_OFFLOAD
  730. /**
  731. * hif_offld_flush_cb_register() - Register the offld flush callback
  732. * @scn: HIF opaque context
  733. * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
  734. * Or GRO/LRO flush when RxThread is not enabled. Called
  735. * with corresponding context for flush.
  736. * Return: None
  737. */
  738. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  739. void (offld_flush_handler)(void *ol_ctx));
  740. /**
  741. * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
  742. * @scn: HIF opaque context
  743. *
  744. * Return: None
  745. */
  746. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
  747. #endif
  748. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  749. /**
  750. * hif_exec_should_yield() - Check if hif napi context should yield
  751. * @hif_ctx - HIF opaque context
  752. * @grp_id - grp_id of the napi for which check needs to be done
  753. *
  754. * The function uses grp_id to look for NAPI and checks if NAPI needs to
  755. * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
  756. * yield decision.
  757. *
  758. * Return: true if NAPI needs to yield, else false
  759. */
  760. bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
  761. #else
  762. static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
  763. uint grp_id)
  764. {
  765. return false;
  766. }
  767. #endif
  768. void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
  769. void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
  770. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  771. int htc_htt_tx_endpoint);
  772. /**
  773. * hif_open() - Create hif handle
  774. * @qdf_ctx: qdf context
  775. * @mode: Driver Mode
  776. * @bus_type: Bus Type
  777. * @cbk: CDS Callbacks
  778. * @psoc: psoc object manager
  779. *
  780. * API to open HIF Context
  781. *
  782. * Return: HIF Opaque Pointer
  783. */
  784. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  785. uint32_t mode,
  786. enum qdf_bus_type bus_type,
  787. struct hif_driver_state_callbacks *cbk,
  788. struct wlan_objmgr_psoc *psoc);
  789. void hif_close(struct hif_opaque_softc *hif_ctx);
  790. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  791. void *bdev, const struct hif_bus_id *bid,
  792. enum qdf_bus_type bus_type,
  793. enum hif_enable_type type);
  794. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
  795. #ifdef CE_TASKLET_DEBUG_ENABLE
  796. void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
  797. uint8_t value);
  798. #endif
  799. void hif_display_stats(struct hif_opaque_softc *hif_ctx);
  800. void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
  801. #ifdef FEATURE_RUNTIME_PM
  802. struct hif_pm_runtime_lock;
  803. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
  804. int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx);
  805. int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx);
  806. int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
  807. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
  808. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
  809. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
  810. int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx);
  811. void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
  812. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
  813. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  814. struct hif_pm_runtime_lock *lock);
  815. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  816. struct hif_pm_runtime_lock *lock);
  817. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  818. struct hif_pm_runtime_lock *lock);
  819. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  820. struct hif_pm_runtime_lock *lock, unsigned int delay);
  821. bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
  822. int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
  823. void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
  824. int val);
  825. void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
  826. int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
  827. qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
  828. int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx);
  829. #else
  830. struct hif_pm_runtime_lock {
  831. const char *name;
  832. };
  833. static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  834. static inline int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx)
  835. { return 0; }
  836. static inline int
  837. hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx)
  838. { return 0; }
  839. static inline int
  840. hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
  841. { return 0; }
  842. static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  843. {}
  844. static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  845. { return 0; }
  846. static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  847. { return 0; }
  848. static inline int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx)
  849. { return 0; }
  850. static inline void
  851. hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
  852. static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
  853. const char *name)
  854. { return 0; }
  855. static inline void
  856. hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  857. struct hif_pm_runtime_lock *lock) {}
  858. static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  859. struct hif_pm_runtime_lock *lock)
  860. { return 0; }
  861. static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  862. struct hif_pm_runtime_lock *lock)
  863. { return 0; }
  864. static inline int
  865. hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  866. struct hif_pm_runtime_lock *lock, unsigned int delay)
  867. { return 0; }
  868. static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
  869. { return false; }
  870. static inline int
  871. hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
  872. { return 0; }
  873. static inline void
  874. hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
  875. { return; }
  876. static inline void
  877. hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
  878. static inline int
  879. hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  880. { return 0; }
  881. static inline qdf_time_t
  882. hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
  883. { return 0; }
  884. static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
  885. { return 0; }
  886. #endif
  887. void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
  888. bool is_packet_log_enabled);
  889. void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
  890. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
  891. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
  892. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
  893. #ifdef IPA_OFFLOAD
  894. /**
  895. * hif_get_ipa_hw_type() - get IPA hw type
  896. *
  897. * This API return the IPA hw type.
  898. *
  899. * Return: IPA hw type
  900. */
  901. static inline
  902. enum ipa_hw_type hif_get_ipa_hw_type(void)
  903. {
  904. return ipa_get_hw_type();
  905. }
  906. /**
  907. * hif_get_ipa_present() - get IPA hw status
  908. *
  909. * This API return the IPA hw status.
  910. *
  911. * Return: true if IPA is present or false otherwise
  912. */
  913. static inline
  914. bool hif_get_ipa_present(void)
  915. {
  916. if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
  917. return true;
  918. else
  919. return false;
  920. }
  921. #endif
  922. int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
  923. /**
  924. * hif_bus_ealry_suspend() - stop non wmi tx traffic
  925. * @context: hif context
  926. */
  927. int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
  928. /**
  929. * hif_bus_late_resume() - resume non wmi traffic
  930. * @context: hif context
  931. */
  932. int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
  933. int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
  934. int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
  935. int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
  936. /**
  937. * hif_apps_irqs_enable() - Enables all irqs from the APPS side
  938. * @hif_ctx: an opaque HIF handle to use
  939. *
  940. * As opposed to the standard hif_irq_enable, this function always applies to
  941. * the APPS side kernel interrupt handling.
  942. *
  943. * Return: errno
  944. */
  945. int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
  946. /**
  947. * hif_apps_irqs_disable() - Disables all irqs from the APPS side
  948. * @hif_ctx: an opaque HIF handle to use
  949. *
  950. * As opposed to the standard hif_irq_disable, this function always applies to
  951. * the APPS side kernel interrupt handling.
  952. *
  953. * Return: errno
  954. */
  955. int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
  956. /**
  957. * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
  958. * @hif_ctx: an opaque HIF handle to use
  959. *
  960. * As opposed to the standard hif_irq_enable, this function always applies to
  961. * the APPS side kernel interrupt handling.
  962. *
  963. * Return: errno
  964. */
  965. int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
  966. /**
  967. * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
  968. * @hif_ctx: an opaque HIF handle to use
  969. *
  970. * As opposed to the standard hif_irq_disable, this function always applies to
  971. * the APPS side kernel interrupt handling.
  972. *
  973. * Return: errno
  974. */
  975. int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
  976. #ifdef FEATURE_RUNTIME_PM
  977. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  978. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
  979. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  980. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
  981. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
  982. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
  983. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
  984. #endif
  985. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
  986. int hif_dump_registers(struct hif_opaque_softc *scn);
  987. int ol_copy_ramdump(struct hif_opaque_softc *scn);
  988. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
  989. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  990. u32 *revision, const char **target_name);
  991. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
  992. struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
  993. scn);
  994. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
  995. struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
  996. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
  997. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  998. hif_target_status);
  999. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  1000. struct hif_config_info *cfg);
  1001. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
  1002. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1003. uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
  1004. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1005. uint32_t transfer_id, u_int32_t len);
  1006. int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
  1007. uint32_t transfer_id, uint32_t download_len);
  1008. void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
  1009. void hif_ce_war_disable(void);
  1010. void hif_ce_war_enable(void);
  1011. void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
  1012. #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
  1013. struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
  1014. struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
  1015. uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
  1016. uint32_t pipe_num);
  1017. int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
  1018. #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
  1019. void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
  1020. int rx_bundle_cnt);
  1021. int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
  1022. void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
  1023. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
  1024. enum hif_exec_type {
  1025. HIF_EXEC_NAPI_TYPE,
  1026. HIF_EXEC_TASKLET_TYPE,
  1027. };
  1028. typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
  1029. /**
  1030. * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
  1031. * @softc: hif opaque context owning the exec context
  1032. * @id: the id of the interrupt context
  1033. *
  1034. * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
  1035. * 'id' registered with the OS
  1036. */
  1037. int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
  1038. uint8_t id);
  1039. uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
  1040. uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  1041. uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
  1042. void *cb_ctx, const char *context_name,
  1043. enum hif_exec_type type, uint32_t scale);
  1044. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  1045. const char *context_name);
  1046. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  1047. u_int8_t pipeid,
  1048. struct hif_msg_callbacks *callbacks);
  1049. /**
  1050. * hif_print_napi_stats() - Display HIF NAPI stats
  1051. * @hif_ctx - HIF opaque context
  1052. *
  1053. * Return: None
  1054. */
  1055. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
  1056. /* hif_clear_napi_stats() - function clears the stats of the
  1057. * latency when called.
  1058. * @hif_ctx - the HIF context to assign the callback to
  1059. *
  1060. * Return: None
  1061. */
  1062. void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
  1063. #ifdef __cplusplus
  1064. }
  1065. #endif
  1066. #ifdef FORCE_WAKE
  1067. /**
  1068. * hif_force_wake_request() - Function to wake from power collapse
  1069. * @handle: HIF opaque handle
  1070. *
  1071. * Description: API to check if the device is awake or not before
  1072. * read/write to BAR + 4K registers. If device is awake return
  1073. * success otherwise write '1' to
  1074. * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
  1075. * the device and does wakeup the PCI and MHI within 50ms
  1076. * and then the device writes a value to
  1077. * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
  1078. * handshake process to let the host know the device is awake.
  1079. *
  1080. * Return: zero - success/non-zero - failure
  1081. */
  1082. int hif_force_wake_request(struct hif_opaque_softc *handle);
  1083. /**
  1084. * hif_force_wake_release() - API to release/reset the SOC wake register
  1085. * from interrupting the device.
  1086. * @handle: HIF opaque handle
  1087. *
  1088. * Description: API to set the
  1089. * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
  1090. * to release the interrupt line.
  1091. *
  1092. * Return: zero - success/non-zero - failure
  1093. */
  1094. int hif_force_wake_release(struct hif_opaque_softc *handle);
  1095. #else
  1096. static inline
  1097. int hif_force_wake_request(struct hif_opaque_softc *handle)
  1098. {
  1099. return 0;
  1100. }
  1101. static inline
  1102. int hif_force_wake_release(struct hif_opaque_softc *handle)
  1103. {
  1104. return 0;
  1105. }
  1106. #endif /* FORCE_WAKE */
  1107. #ifdef FEATURE_HAL_DELAYED_REG_WRITE
  1108. /**
  1109. * hif_prevent_link_low_power_states() - Prevent from going to low power states
  1110. * @hif - HIF opaque context
  1111. *
  1112. * Return: 0 on success. Error code on failure.
  1113. */
  1114. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
  1115. /**
  1116. * hif_allow_link_low_power_states() - Allow link to go to low power states
  1117. * @hif - HIF opaque context
  1118. *
  1119. * Return: None
  1120. */
  1121. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
  1122. #else
  1123. static inline
  1124. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
  1125. {
  1126. return 0;
  1127. }
  1128. static inline
  1129. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
  1130. {
  1131. }
  1132. #endif
  1133. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
  1134. /**
  1135. * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
  1136. * @hif_ctx - the HIF context to assign the callback to
  1137. * @callback - the callback to assign
  1138. * @priv - the private data to pass to the callback when invoked
  1139. *
  1140. * Return: None
  1141. */
  1142. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  1143. void (*callback)(void *),
  1144. void *priv);
  1145. /*
  1146. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  1147. * for defined here
  1148. */
  1149. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  1150. ssize_t hif_dump_desc_trace_buf(struct device *dev,
  1151. struct device_attribute *attr, char *buf);
  1152. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  1153. const char *buf, size_t size);
  1154. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
  1155. const char *buf, size_t size);
  1156. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
  1157. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
  1158. #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
  1159. /**
  1160. * hif_set_ce_service_max_yield_time() - sets CE service max yield time
  1161. * @hif: hif context
  1162. * @ce_service_max_yield_time: CE service max yield time to set
  1163. *
  1164. * This API storess CE service max yield time in hif context based
  1165. * on ini value.
  1166. *
  1167. * Return: void
  1168. */
  1169. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  1170. uint32_t ce_service_max_yield_time);
  1171. /**
  1172. * hif_get_ce_service_max_yield_time() - get CE service max yield time
  1173. * @hif: hif context
  1174. *
  1175. * This API returns CE service max yield time.
  1176. *
  1177. * Return: CE service max yield time
  1178. */
  1179. unsigned long long
  1180. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
  1181. /**
  1182. * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
  1183. * @hif: hif context
  1184. * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
  1185. *
  1186. * This API stores CE service max rx ind flush in hif context based
  1187. * on ini value.
  1188. *
  1189. * Return: void
  1190. */
  1191. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  1192. uint8_t ce_service_max_rx_ind_flush);
  1193. #ifdef OL_ATH_SMART_LOGGING
  1194. /*
  1195. * hif_log_ce_dump() - Copy all the CE DEST ring to buf
  1196. * @scn : HIF handler
  1197. * @buf_cur: Current pointer in ring buffer
  1198. * @buf_init:Start of the ring buffer
  1199. * @buf_sz: Size of the ring buffer
  1200. * @ce: Copy Engine id
  1201. * @skb_sz: Max size of the SKB buffer to be copied
  1202. *
  1203. * Calls the respective function to dump all the CE SRC/DEST ring descriptors
  1204. * and buffers pointed by them in to the given buf
  1205. *
  1206. * Return: Current pointer in ring buffer
  1207. */
  1208. uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
  1209. uint8_t *buf_init, uint32_t buf_sz,
  1210. uint32_t ce, uint32_t skb_sz);
  1211. #endif /* OL_ATH_SMART_LOGGING */
  1212. /*
  1213. * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
  1214. * to hif_opaque_softc handle
  1215. * @hif_handle - hif_softc type
  1216. *
  1217. * Return: hif_opaque_softc type
  1218. */
  1219. static inline struct hif_opaque_softc *
  1220. hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
  1221. {
  1222. return (struct hif_opaque_softc *)hif_handle;
  1223. }
  1224. #ifdef FORCE_WAKE
  1225. /**
  1226. * hif_srng_init_phase(): Indicate srng initialization phase
  1227. * to avoid force wake as UMAC power collapse is not yet
  1228. * enabled
  1229. * @hif_ctx: hif opaque handle
  1230. * @init_phase: initialization phase
  1231. *
  1232. * Return: None
  1233. */
  1234. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  1235. bool init_phase);
  1236. #else
  1237. static inline
  1238. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  1239. bool init_phase)
  1240. {
  1241. }
  1242. #endif /* FORCE_WAKE */
  1243. #endif /* _HIF_H_ */