hif.h 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353
  1. /*
  2. * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _HIF_H_
  19. #define _HIF_H_
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif /* __cplusplus */
  23. /* Header files */
  24. #include <qdf_status.h>
  25. #include "qdf_nbuf.h"
  26. #include "qdf_lro.h"
  27. #include "ol_if_athvar.h"
  28. #include <linux/platform_device.h>
  29. #ifdef HIF_PCI
  30. #include <linux/pci.h>
  31. #endif /* HIF_PCI */
  32. #ifdef HIF_USB
  33. #include <linux/usb.h>
  34. #endif /* HIF_USB */
  35. #ifdef IPA_OFFLOAD
  36. #include <linux/ipa.h>
  37. #endif
  38. #include "cfg_ucfg_api.h"
  39. #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
  40. typedef void __iomem *A_target_id_t;
  41. typedef void *hif_handle_t;
  42. #define HIF_TYPE_AR6002 2
  43. #define HIF_TYPE_AR6003 3
  44. #define HIF_TYPE_AR6004 5
  45. #define HIF_TYPE_AR9888 6
  46. #define HIF_TYPE_AR6320 7
  47. #define HIF_TYPE_AR6320V2 8
  48. /* For attaching Peregrine 2.0 board host_reg_tbl only */
  49. #define HIF_TYPE_AR9888V2 9
  50. #define HIF_TYPE_ADRASTEA 10
  51. #define HIF_TYPE_AR900B 11
  52. #define HIF_TYPE_QCA9984 12
  53. #define HIF_TYPE_IPQ4019 13
  54. #define HIF_TYPE_QCA9888 14
  55. #define HIF_TYPE_QCA8074 15
  56. #define HIF_TYPE_QCA6290 16
  57. #define HIF_TYPE_QCN7605 17
  58. #define HIF_TYPE_QCA6390 18
  59. #define HIF_TYPE_QCA8074V2 19
  60. #define HIF_TYPE_QCA6018 20
  61. #define HIF_TYPE_QCN9000 21
  62. #define HIF_TYPE_QCA6490 22
  63. #define HIF_TYPE_QCA6750 23
  64. #ifdef IPA_OFFLOAD
  65. #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37
  66. #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
  67. #endif
  68. /* enum hif_ic_irq - enum defining integrated chip irq numbers
  69. * defining irq nubers that can be used by external modules like datapath
  70. */
  71. enum hif_ic_irq {
  72. host2wbm_desc_feed = 16,
  73. host2reo_re_injection,
  74. host2reo_command,
  75. host2rxdma_monitor_ring3,
  76. host2rxdma_monitor_ring2,
  77. host2rxdma_monitor_ring1,
  78. reo2host_exception,
  79. wbm2host_rx_release,
  80. reo2host_status,
  81. reo2host_destination_ring4,
  82. reo2host_destination_ring3,
  83. reo2host_destination_ring2,
  84. reo2host_destination_ring1,
  85. rxdma2host_monitor_destination_mac3,
  86. rxdma2host_monitor_destination_mac2,
  87. rxdma2host_monitor_destination_mac1,
  88. ppdu_end_interrupts_mac3,
  89. ppdu_end_interrupts_mac2,
  90. ppdu_end_interrupts_mac1,
  91. rxdma2host_monitor_status_ring_mac3,
  92. rxdma2host_monitor_status_ring_mac2,
  93. rxdma2host_monitor_status_ring_mac1,
  94. host2rxdma_host_buf_ring_mac3,
  95. host2rxdma_host_buf_ring_mac2,
  96. host2rxdma_host_buf_ring_mac1,
  97. rxdma2host_destination_ring_mac3,
  98. rxdma2host_destination_ring_mac2,
  99. rxdma2host_destination_ring_mac1,
  100. host2tcl_input_ring4,
  101. host2tcl_input_ring3,
  102. host2tcl_input_ring2,
  103. host2tcl_input_ring1,
  104. wbm2host_tx_completions_ring3,
  105. wbm2host_tx_completions_ring2,
  106. wbm2host_tx_completions_ring1,
  107. tcl2host_status_ring,
  108. };
  109. struct CE_state;
  110. #define CE_COUNT_MAX 12
  111. #define HIF_MAX_GRP_IRQ 16
  112. #ifndef HIF_MAX_GROUP
  113. #define HIF_MAX_GROUP 7
  114. #endif
  115. #ifndef NAPI_YIELD_BUDGET_BASED
  116. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  117. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 3
  118. #else
  119. #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
  120. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
  121. #endif
  122. #endif /* SLUB_DEBUG_ON */
  123. #else /* NAPI_YIELD_BUDGET_BASED */
  124. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
  125. #endif /* NAPI_YIELD_BUDGET_BASED */
  126. #define QCA_NAPI_BUDGET 64
  127. #define QCA_NAPI_DEF_SCALE \
  128. (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
  129. #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
  130. /* NOTE: "napi->scale" can be changed,
  131. * but this does not change the number of buckets
  132. */
  133. #define QCA_NAPI_NUM_BUCKETS 4
  134. /**
  135. * qca_napi_stat - stats structure for execution contexts
  136. * @napi_schedules - number of times the schedule function is called
  137. * @napi_polls - number of times the execution context runs
  138. * @napi_completes - number of times that the generating interrupt is reenabled
  139. * @napi_workdone - cumulative of all work done reported by handler
  140. * @cpu_corrected - incremented when execution context runs on a different core
  141. * than the one that its irq is affined to.
  142. * @napi_budget_uses - histogram of work done per execution run
  143. * @time_limit_reache - count of yields due to time limit threshholds
  144. * @rxpkt_thresh_reached - count of yields due to a work limit
  145. * @poll_time_buckets - histogram of poll times for the napi
  146. *
  147. */
  148. struct qca_napi_stat {
  149. uint32_t napi_schedules;
  150. uint32_t napi_polls;
  151. uint32_t napi_completes;
  152. uint32_t napi_workdone;
  153. uint32_t cpu_corrected;
  154. uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
  155. uint32_t time_limit_reached;
  156. uint32_t rxpkt_thresh_reached;
  157. unsigned long long napi_max_poll_time;
  158. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  159. uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
  160. #endif
  161. };
  162. /**
  163. * per NAPI instance data structure
  164. * This data structure holds stuff per NAPI instance.
  165. * Note that, in the current implementation, though scale is
  166. * an instance variable, it is set to the same value for all
  167. * instances.
  168. */
  169. struct qca_napi_info {
  170. struct net_device netdev; /* dummy net_dev */
  171. void *hif_ctx;
  172. struct napi_struct napi;
  173. uint8_t scale; /* currently same on all instances */
  174. uint8_t id;
  175. uint8_t cpu;
  176. int irq;
  177. cpumask_t cpumask;
  178. struct qca_napi_stat stats[NR_CPUS];
  179. #ifdef RECEIVE_OFFLOAD
  180. /* will only be present for data rx CE's */
  181. void (*offld_flush_cb)(void *);
  182. struct napi_struct rx_thread_napi;
  183. struct net_device rx_thread_netdev;
  184. #endif /* RECEIVE_OFFLOAD */
  185. qdf_lro_ctx_t lro_ctx;
  186. };
  187. enum qca_napi_tput_state {
  188. QCA_NAPI_TPUT_UNINITIALIZED,
  189. QCA_NAPI_TPUT_LO,
  190. QCA_NAPI_TPUT_HI
  191. };
  192. enum qca_napi_cpu_state {
  193. QCA_NAPI_CPU_UNINITIALIZED,
  194. QCA_NAPI_CPU_DOWN,
  195. QCA_NAPI_CPU_UP };
  196. /**
  197. * struct qca_napi_cpu - an entry of the napi cpu table
  198. * @core_id: physical core id of the core
  199. * @cluster_id: cluster this core belongs to
  200. * @core_mask: mask to match all core of this cluster
  201. * @thread_mask: mask for this core within the cluster
  202. * @max_freq: maximum clock this core can be clocked at
  203. * same for all cpus of the same core.
  204. * @napis: bitmap of napi instances on this core
  205. * @execs: bitmap of execution contexts on this core
  206. * cluster_nxt: chain to link cores within the same cluster
  207. *
  208. * This structure represents a single entry in the napi cpu
  209. * table. The table is part of struct qca_napi_data.
  210. * This table is initialized by the init function, called while
  211. * the first napi instance is being created, updated by hotplug
  212. * notifier and when cpu affinity decisions are made (by throughput
  213. * detection), and deleted when the last napi instance is removed.
  214. */
  215. struct qca_napi_cpu {
  216. enum qca_napi_cpu_state state;
  217. int core_id;
  218. int cluster_id;
  219. cpumask_t core_mask;
  220. cpumask_t thread_mask;
  221. unsigned int max_freq;
  222. uint32_t napis;
  223. uint32_t execs;
  224. int cluster_nxt; /* index, not pointer */
  225. };
  226. /**
  227. * struct qca_napi_data - collection of napi data for a single hif context
  228. * @hif_softc: pointer to the hif context
  229. * @lock: spinlock used in the event state machine
  230. * @state: state variable used in the napi stat machine
  231. * @ce_map: bit map indicating which ce's have napis running
  232. * @exec_map: bit map of instanciated exec contexts
  233. * @user_cpu_affin_map: CPU affinity map from INI config.
  234. * @napi_cpu: cpu info for irq affinty
  235. * @lilcl_head:
  236. * @bigcl_head:
  237. * @napi_mode: irq affinity & clock voting mode
  238. * @cpuhp_handler: CPU hotplug event registration handle
  239. */
  240. struct qca_napi_data {
  241. struct hif_softc *hif_softc;
  242. qdf_spinlock_t lock;
  243. uint32_t state;
  244. /* bitmap of created/registered NAPI instances, indexed by pipe_id,
  245. * not used by clients (clients use an id returned by create)
  246. */
  247. uint32_t ce_map;
  248. uint32_t exec_map;
  249. uint32_t user_cpu_affin_mask;
  250. struct qca_napi_info *napis[CE_COUNT_MAX];
  251. struct qca_napi_cpu napi_cpu[NR_CPUS];
  252. int lilcl_head, bigcl_head;
  253. enum qca_napi_tput_state napi_mode;
  254. struct qdf_cpuhp_handler *cpuhp_handler;
  255. uint8_t flags;
  256. };
  257. /**
  258. * struct hif_config_info - Place Holder for HIF configuration
  259. * @enable_self_recovery: Self Recovery
  260. * @enable_runtime_pm: Enable Runtime PM
  261. * @runtime_pm_delay: Runtime PM Delay
  262. * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
  263. *
  264. * Structure for holding HIF ini parameters.
  265. */
  266. struct hif_config_info {
  267. bool enable_self_recovery;
  268. #ifdef FEATURE_RUNTIME_PM
  269. uint8_t enable_runtime_pm;
  270. u_int32_t runtime_pm_delay;
  271. #endif
  272. uint64_t rx_softirq_max_yield_duration_ns;
  273. };
  274. /**
  275. * struct hif_target_info - Target Information
  276. * @target_version: Target Version
  277. * @target_type: Target Type
  278. * @target_revision: Target Revision
  279. * @soc_version: SOC Version
  280. * @hw_name: pointer to hardware name
  281. *
  282. * Structure to hold target information.
  283. */
  284. struct hif_target_info {
  285. uint32_t target_version;
  286. uint32_t target_type;
  287. uint32_t target_revision;
  288. uint32_t soc_version;
  289. char *hw_name;
  290. };
  291. struct hif_opaque_softc {
  292. };
  293. /**
  294. * enum hif_event_type - Type of DP events to be recorded
  295. * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
  296. * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
  297. * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
  298. * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
  299. */
  300. enum hif_event_type {
  301. HIF_EVENT_IRQ_TRIGGER,
  302. HIF_EVENT_BH_SCHED,
  303. HIF_EVENT_SRNG_ACCESS_START,
  304. HIF_EVENT_SRNG_ACCESS_END,
  305. };
  306. #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
  307. /* HIF_EVENT_HIST_MAX should always be power of 2 */
  308. #define HIF_EVENT_HIST_MAX 512
  309. #define HIF_NUM_INT_CONTEXTS HIF_MAX_GROUP
  310. #define HIF_EVENT_HIST_DISABLE_MASK 0
  311. /**
  312. * struct hif_event_record - an entry of the DP event history
  313. * @hal_ring_id: ring id for which event is recorded
  314. * @hp: head pointer of the ring (may not be applicable for all events)
  315. * @tp: tail pointer of the ring (may not be applicable for all events)
  316. * @cpu_id: cpu id on which the event occurred
  317. * @timestamp: timestamp when event occurred
  318. * @type: type of the event
  319. *
  320. * This structure represents the information stored for every datapath
  321. * event which is logged in the history.
  322. */
  323. struct hif_event_record {
  324. uint8_t hal_ring_id;
  325. uint32_t hp;
  326. uint32_t tp;
  327. int cpu_id;
  328. uint64_t timestamp;
  329. enum hif_event_type type;
  330. };
  331. /**
  332. * struct hif_event_history - history for one interrupt group
  333. * @index: index to store new event
  334. * @event: event entry
  335. *
  336. * This structure represents the datapath history for one
  337. * interrupt group.
  338. */
  339. struct hif_event_history {
  340. qdf_atomic_t index;
  341. struct hif_event_record event[HIF_EVENT_HIST_MAX];
  342. };
  343. /**
  344. * hif_hist_record_event() - Record one datapath event in history
  345. * @hif_ctx: HIF opaque context
  346. * @event: DP event entry
  347. * @intr_grp_id: interrupt group ID registered with hif
  348. *
  349. * Return: None
  350. */
  351. void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
  352. struct hif_event_record *event,
  353. uint8_t intr_grp_id);
  354. /**
  355. * hif_record_event() - Wrapper function to form and record DP event
  356. * @hif_ctx: HIF opaque context
  357. * @intr_grp_id: interrupt group ID registered with hif
  358. * @hal_ring_id: ring id for which event is recorded
  359. * @hp: head pointer index of the srng
  360. * @tp: tail pointer index of the srng
  361. * @type: type of the event to be logged in history
  362. *
  363. * Return: None
  364. */
  365. static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
  366. uint8_t intr_grp_id,
  367. uint8_t hal_ring_id,
  368. uint32_t hp,
  369. uint32_t tp,
  370. enum hif_event_type type)
  371. {
  372. struct hif_event_record event;
  373. event.hal_ring_id = hal_ring_id;
  374. event.hp = hp;
  375. event.tp = tp;
  376. event.type = type;
  377. return hif_hist_record_event(hif_ctx, &event,
  378. intr_grp_id);
  379. }
  380. #else
  381. static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
  382. uint8_t intr_grp_id,
  383. uint8_t hal_ring_id,
  384. uint32_t hp,
  385. uint32_t tp,
  386. enum hif_event_type type)
  387. {
  388. }
  389. #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
  390. /**
  391. * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
  392. *
  393. * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
  394. * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
  395. * minimize power
  396. * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
  397. * platform-specific measures to completely power-off
  398. * the module and associated hardware (i.e. cut power
  399. * supplies)
  400. */
  401. enum HIF_DEVICE_POWER_CHANGE_TYPE {
  402. HIF_DEVICE_POWER_UP,
  403. HIF_DEVICE_POWER_DOWN,
  404. HIF_DEVICE_POWER_CUT
  405. };
  406. /**
  407. * enum hif_enable_type: what triggered the enabling of hif
  408. *
  409. * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
  410. * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
  411. */
  412. enum hif_enable_type {
  413. HIF_ENABLE_TYPE_PROBE,
  414. HIF_ENABLE_TYPE_REINIT,
  415. HIF_ENABLE_TYPE_MAX
  416. };
  417. /**
  418. * enum hif_disable_type: what triggered the disabling of hif
  419. *
  420. * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
  421. * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
  422. * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
  423. * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
  424. */
  425. enum hif_disable_type {
  426. HIF_DISABLE_TYPE_PROBE_ERROR,
  427. HIF_DISABLE_TYPE_REINIT_ERROR,
  428. HIF_DISABLE_TYPE_REMOVE,
  429. HIF_DISABLE_TYPE_SHUTDOWN,
  430. HIF_DISABLE_TYPE_MAX
  431. };
  432. /**
  433. * enum hif_device_config_opcode: configure mode
  434. *
  435. * @HIF_DEVICE_POWER_STATE: device power state
  436. * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
  437. * @HIF_DEVICE_GET_ADDR: get block address
  438. * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
  439. * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
  440. * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
  441. * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
  442. * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
  443. * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
  444. * @HIF_DEVICE_GET_OS_DEVICE: get OS device
  445. * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
  446. * @HIF_BMI_DONE: bmi done
  447. * @HIF_DEVICE_SET_TARGET_TYPE: set target type
  448. * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
  449. * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
  450. */
  451. enum hif_device_config_opcode {
  452. HIF_DEVICE_POWER_STATE = 0,
  453. HIF_DEVICE_GET_BLOCK_SIZE,
  454. HIF_DEVICE_GET_FIFO_ADDR,
  455. HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
  456. HIF_DEVICE_GET_IRQ_PROC_MODE,
  457. HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
  458. HIF_DEVICE_POWER_STATE_CHANGE,
  459. HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
  460. HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
  461. HIF_DEVICE_GET_OS_DEVICE,
  462. HIF_DEVICE_DEBUG_BUS_STATE,
  463. HIF_BMI_DONE,
  464. HIF_DEVICE_SET_TARGET_TYPE,
  465. HIF_DEVICE_SET_HTC_CONTEXT,
  466. HIF_DEVICE_GET_HTC_CONTEXT,
  467. };
  468. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  469. struct HID_ACCESS_LOG {
  470. uint32_t seqnum;
  471. bool is_write;
  472. void *addr;
  473. uint32_t value;
  474. };
  475. #endif
  476. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  477. uint32_t value);
  478. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
  479. #define HIF_MAX_DEVICES 1
  480. /**
  481. * struct htc_callbacks - Structure for HTC Callbacks methods
  482. * @context: context to pass to the dsrhandler
  483. * note : rwCompletionHandler is provided the context
  484. * passed to hif_read_write
  485. * @rwCompletionHandler: Read / write completion handler
  486. * @dsrHandler: DSR Handler
  487. */
  488. struct htc_callbacks {
  489. void *context;
  490. QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
  491. QDF_STATUS(*dsr_handler)(void *context);
  492. };
  493. /**
  494. * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
  495. * @context: Private data context
  496. * @set_recovery_in_progress: To Set Driver state for recovery in progress
  497. * @is_recovery_in_progress: Query if driver state is recovery in progress
  498. * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
  499. * @is_driver_unloading: Query if driver is unloading.
  500. * @get_bandwidth_level: Query current bandwidth level for the driver
  501. * This Structure provides callback pointer for HIF to query hdd for driver
  502. * states.
  503. */
  504. struct hif_driver_state_callbacks {
  505. void *context;
  506. void (*set_recovery_in_progress)(void *context, uint8_t val);
  507. bool (*is_recovery_in_progress)(void *context);
  508. bool (*is_load_unload_in_progress)(void *context);
  509. bool (*is_driver_unloading)(void *context);
  510. bool (*is_target_ready)(void *context);
  511. int (*get_bandwidth_level)(void *context);
  512. };
  513. /* This API detaches the HTC layer from the HIF device */
  514. void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
  515. /****************************************************************/
  516. /* BMI and Diag window abstraction */
  517. /****************************************************************/
  518. #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
  519. #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
  520. * handled atomically by
  521. * DiagRead/DiagWrite
  522. */
  523. #ifdef WLAN_FEATURE_BMI
  524. /*
  525. * API to handle HIF-specific BMI message exchanges, this API is synchronous
  526. * and only allowed to be called from a context that can block (sleep)
  527. */
  528. QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
  529. qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
  530. uint8_t *pSendMessage, uint32_t Length,
  531. uint8_t *pResponseMessage,
  532. uint32_t *pResponseLength, uint32_t TimeoutMS);
  533. void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
  534. bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
  535. #else /* WLAN_FEATURE_BMI */
  536. static inline void
  537. hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
  538. {
  539. }
  540. static inline bool
  541. hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
  542. {
  543. return false;
  544. }
  545. #endif /* WLAN_FEATURE_BMI */
  546. /*
  547. * APIs to handle HIF specific diagnostic read accesses. These APIs are
  548. * synchronous and only allowed to be called from a context that
  549. * can block (sleep). They are not high performance APIs.
  550. *
  551. * hif_diag_read_access reads a 4 Byte aligned/length value from a
  552. * Target register or memory word.
  553. *
  554. * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
  555. */
  556. QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
  557. uint32_t address, uint32_t *data);
  558. QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
  559. uint8_t *data, int nbytes);
  560. void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
  561. void *ramdump_base, uint32_t address, uint32_t size);
  562. /*
  563. * APIs to handle HIF specific diagnostic write accesses. These APIs are
  564. * synchronous and only allowed to be called from a context that
  565. * can block (sleep).
  566. * They are not high performance APIs.
  567. *
  568. * hif_diag_write_access writes a 4 Byte aligned/length value to a
  569. * Target register or memory word.
  570. *
  571. * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
  572. */
  573. QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
  574. uint32_t address, uint32_t data);
  575. QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
  576. uint32_t address, uint8_t *data, int nbytes);
  577. typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
  578. void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
  579. bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
  580. /*
  581. * Set the FASTPATH_mode_on flag in sc, for use by data path
  582. */
  583. #ifdef WLAN_FEATURE_FASTPATH
  584. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
  585. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
  586. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
  587. int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  588. fastpath_msg_handler handler, void *context);
  589. #else
  590. static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  591. fastpath_msg_handler handler,
  592. void *context)
  593. {
  594. return QDF_STATUS_E_FAILURE;
  595. }
  596. static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
  597. {
  598. return NULL;
  599. }
  600. #endif
  601. /*
  602. * Enable/disable CDC max performance workaround
  603. * For max-performace set this to 0
  604. * To allow SoC to enter sleep set this to 1
  605. */
  606. #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
  607. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  608. qdf_shared_mem_t **ce_sr,
  609. uint32_t *ce_sr_ring_size,
  610. qdf_dma_addr_t *ce_reg_paddr);
  611. /**
  612. * @brief List of callbacks - filled in by HTC.
  613. */
  614. struct hif_msg_callbacks {
  615. void *Context;
  616. /**< context meaningful to HTC */
  617. QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  618. uint32_t transferID,
  619. uint32_t toeplitz_hash_result);
  620. QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  621. uint8_t pipeID);
  622. void (*txResourceAvailHandler)(void *context, uint8_t pipe);
  623. void (*fwEventHandler)(void *context, QDF_STATUS status);
  624. void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
  625. };
  626. enum hif_target_status {
  627. TARGET_STATUS_CONNECTED = 0, /* target connected */
  628. TARGET_STATUS_RESET, /* target got reset */
  629. TARGET_STATUS_EJECT, /* target got ejected */
  630. TARGET_STATUS_SUSPEND /*target got suspend */
  631. };
  632. /**
  633. * enum hif_attribute_flags: configure hif
  634. *
  635. * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
  636. * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
  637. * + No pktlog CE
  638. */
  639. enum hif_attribute_flags {
  640. HIF_LOWDESC_CE_CFG = 1,
  641. HIF_LOWDESC_CE_NO_PKTLOG_CFG
  642. };
  643. #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
  644. (attr |= (v & 0x01) << 5)
  645. #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
  646. (attr |= (v & 0x03) << 6)
  647. #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
  648. (attr |= (v & 0x01) << 13)
  649. #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
  650. (attr |= (v & 0x01) << 14)
  651. #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
  652. (attr |= (v & 0x01) << 15)
  653. #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
  654. (attr |= (v & 0x0FFF) << 16)
  655. #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
  656. (attr |= (v & 0x01) << 30)
  657. struct hif_ul_pipe_info {
  658. unsigned int nentries;
  659. unsigned int nentries_mask;
  660. unsigned int sw_index;
  661. unsigned int write_index; /* cached copy */
  662. unsigned int hw_index; /* cached copy */
  663. void *base_addr_owner_space; /* Host address space */
  664. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  665. };
  666. struct hif_dl_pipe_info {
  667. unsigned int nentries;
  668. unsigned int nentries_mask;
  669. unsigned int sw_index;
  670. unsigned int write_index; /* cached copy */
  671. unsigned int hw_index; /* cached copy */
  672. void *base_addr_owner_space; /* Host address space */
  673. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  674. };
  675. struct hif_pipe_addl_info {
  676. uint32_t pci_mem;
  677. uint32_t ctrl_addr;
  678. struct hif_ul_pipe_info ul_pipe;
  679. struct hif_dl_pipe_info dl_pipe;
  680. };
  681. #ifdef CONFIG_SLUB_DEBUG_ON
  682. #define MSG_FLUSH_NUM 16
  683. #else /* PERF build */
  684. #define MSG_FLUSH_NUM 32
  685. #endif /* SLUB_DEBUG_ON */
  686. struct hif_bus_id;
  687. void hif_claim_device(struct hif_opaque_softc *hif_ctx);
  688. QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
  689. int opcode, void *config, uint32_t config_len);
  690. void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
  691. void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
  692. void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
  693. struct hif_msg_callbacks *callbacks);
  694. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
  695. void hif_stop(struct hif_opaque_softc *hif_ctx);
  696. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
  697. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
  698. void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
  699. uint8_t cmd_id, bool start);
  700. QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  701. uint32_t transferID, uint32_t nbytes,
  702. qdf_nbuf_t wbuf, uint32_t data_attr);
  703. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  704. int force);
  705. void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
  706. void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
  707. uint8_t *DLPipe);
  708. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
  709. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  710. int *dl_is_polled);
  711. uint16_t
  712. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
  713. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
  714. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
  715. void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
  716. bool wait_for_it);
  717. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
  718. #ifndef HIF_PCI
  719. static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  720. {
  721. return 0;
  722. }
  723. #else
  724. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
  725. #endif
  726. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  727. u32 *revision, const char **target_name);
  728. #ifdef RECEIVE_OFFLOAD
  729. /**
  730. * hif_offld_flush_cb_register() - Register the offld flush callback
  731. * @scn: HIF opaque context
  732. * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
  733. * Or GRO/LRO flush when RxThread is not enabled. Called
  734. * with corresponding context for flush.
  735. * Return: None
  736. */
  737. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  738. void (offld_flush_handler)(void *ol_ctx));
  739. /**
  740. * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
  741. * @scn: HIF opaque context
  742. *
  743. * Return: None
  744. */
  745. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
  746. #endif
  747. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  748. /**
  749. * hif_exec_should_yield() - Check if hif napi context should yield
  750. * @hif_ctx - HIF opaque context
  751. * @grp_id - grp_id of the napi for which check needs to be done
  752. *
  753. * The function uses grp_id to look for NAPI and checks if NAPI needs to
  754. * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
  755. * yield decision.
  756. *
  757. * Return: true if NAPI needs to yield, else false
  758. */
  759. bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
  760. #else
  761. static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
  762. uint grp_id)
  763. {
  764. return false;
  765. }
  766. #endif
  767. void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
  768. void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
  769. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  770. int htc_htt_tx_endpoint);
  771. /**
  772. * hif_open() - Create hif handle
  773. * @qdf_ctx: qdf context
  774. * @mode: Driver Mode
  775. * @bus_type: Bus Type
  776. * @cbk: CDS Callbacks
  777. * @psoc: psoc object manager
  778. *
  779. * API to open HIF Context
  780. *
  781. * Return: HIF Opaque Pointer
  782. */
  783. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  784. uint32_t mode,
  785. enum qdf_bus_type bus_type,
  786. struct hif_driver_state_callbacks *cbk,
  787. struct wlan_objmgr_psoc *psoc);
  788. void hif_close(struct hif_opaque_softc *hif_ctx);
  789. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  790. void *bdev, const struct hif_bus_id *bid,
  791. enum qdf_bus_type bus_type,
  792. enum hif_enable_type type);
  793. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
  794. #ifdef CE_TASKLET_DEBUG_ENABLE
  795. void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
  796. uint8_t value);
  797. #endif
  798. void hif_display_stats(struct hif_opaque_softc *hif_ctx);
  799. void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
  800. #ifdef FEATURE_RUNTIME_PM
  801. struct hif_pm_runtime_lock;
  802. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
  803. int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx);
  804. int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx);
  805. int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
  806. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
  807. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
  808. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
  809. int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx);
  810. void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
  811. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
  812. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  813. struct hif_pm_runtime_lock *lock);
  814. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  815. struct hif_pm_runtime_lock *lock);
  816. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  817. struct hif_pm_runtime_lock *lock);
  818. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  819. struct hif_pm_runtime_lock *lock, unsigned int delay);
  820. bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
  821. int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
  822. void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
  823. int val);
  824. void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
  825. int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
  826. qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
  827. int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx);
  828. #else
  829. struct hif_pm_runtime_lock {
  830. const char *name;
  831. };
  832. static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  833. static inline int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx)
  834. { return 0; }
  835. static inline int
  836. hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx)
  837. { return 0; }
  838. static inline int
  839. hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
  840. { return 0; }
  841. static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  842. {}
  843. static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  844. { return 0; }
  845. static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  846. { return 0; }
  847. static inline int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx)
  848. { return 0; }
  849. static inline void
  850. hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
  851. static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
  852. const char *name)
  853. { return 0; }
  854. static inline void
  855. hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  856. struct hif_pm_runtime_lock *lock) {}
  857. static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  858. struct hif_pm_runtime_lock *lock)
  859. { return 0; }
  860. static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  861. struct hif_pm_runtime_lock *lock)
  862. { return 0; }
  863. static inline int
  864. hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  865. struct hif_pm_runtime_lock *lock, unsigned int delay)
  866. { return 0; }
  867. static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
  868. { return false; }
  869. static inline int
  870. hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
  871. { return 0; }
  872. static inline void
  873. hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
  874. { return; }
  875. static inline void
  876. hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
  877. static inline int
  878. hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  879. { return 0; }
  880. static inline qdf_time_t
  881. hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
  882. { return 0; }
  883. static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
  884. { return 0; }
  885. #endif
  886. void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
  887. bool is_packet_log_enabled);
  888. void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
  889. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
  890. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
  891. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
  892. #ifdef IPA_OFFLOAD
  893. /**
  894. * hif_get_ipa_hw_type() - get IPA hw type
  895. *
  896. * This API return the IPA hw type.
  897. *
  898. * Return: IPA hw type
  899. */
  900. static inline
  901. enum ipa_hw_type hif_get_ipa_hw_type(void)
  902. {
  903. return ipa_get_hw_type();
  904. }
  905. /**
  906. * hif_get_ipa_present() - get IPA hw status
  907. *
  908. * This API return the IPA hw status.
  909. *
  910. * Return: true if IPA is present or false otherwise
  911. */
  912. static inline
  913. bool hif_get_ipa_present(void)
  914. {
  915. if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
  916. return true;
  917. else
  918. return false;
  919. }
  920. #endif
  921. int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
  922. /**
  923. * hif_bus_ealry_suspend() - stop non wmi tx traffic
  924. * @context: hif context
  925. */
  926. int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
  927. /**
  928. * hif_bus_late_resume() - resume non wmi traffic
  929. * @context: hif context
  930. */
  931. int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
  932. int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
  933. int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
  934. int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
  935. /**
  936. * hif_apps_irqs_enable() - Enables all irqs from the APPS side
  937. * @hif_ctx: an opaque HIF handle to use
  938. *
  939. * As opposed to the standard hif_irq_enable, this function always applies to
  940. * the APPS side kernel interrupt handling.
  941. *
  942. * Return: errno
  943. */
  944. int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
  945. /**
  946. * hif_apps_irqs_disable() - Disables all irqs from the APPS side
  947. * @hif_ctx: an opaque HIF handle to use
  948. *
  949. * As opposed to the standard hif_irq_disable, this function always applies to
  950. * the APPS side kernel interrupt handling.
  951. *
  952. * Return: errno
  953. */
  954. int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
  955. /**
  956. * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
  957. * @hif_ctx: an opaque HIF handle to use
  958. *
  959. * As opposed to the standard hif_irq_enable, this function always applies to
  960. * the APPS side kernel interrupt handling.
  961. *
  962. * Return: errno
  963. */
  964. int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
  965. /**
  966. * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
  967. * @hif_ctx: an opaque HIF handle to use
  968. *
  969. * As opposed to the standard hif_irq_disable, this function always applies to
  970. * the APPS side kernel interrupt handling.
  971. *
  972. * Return: errno
  973. */
  974. int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
  975. #ifdef FEATURE_RUNTIME_PM
  976. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  977. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
  978. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  979. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
  980. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
  981. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
  982. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
  983. #endif
  984. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
  985. int hif_dump_registers(struct hif_opaque_softc *scn);
  986. int ol_copy_ramdump(struct hif_opaque_softc *scn);
  987. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
  988. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  989. u32 *revision, const char **target_name);
  990. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
  991. struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
  992. scn);
  993. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
  994. struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
  995. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
  996. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  997. hif_target_status);
  998. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  999. struct hif_config_info *cfg);
  1000. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
  1001. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1002. uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
  1003. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1004. uint32_t transfer_id, u_int32_t len);
  1005. int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
  1006. uint32_t transfer_id, uint32_t download_len);
  1007. void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
  1008. void hif_ce_war_disable(void);
  1009. void hif_ce_war_enable(void);
  1010. void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
  1011. #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
  1012. struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
  1013. struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
  1014. uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
  1015. uint32_t pipe_num);
  1016. int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
  1017. #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
  1018. void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
  1019. int rx_bundle_cnt);
  1020. int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
  1021. void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
  1022. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
  1023. enum hif_exec_type {
  1024. HIF_EXEC_NAPI_TYPE,
  1025. HIF_EXEC_TASKLET_TYPE,
  1026. };
  1027. typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
  1028. /**
  1029. * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
  1030. * @softc: hif opaque context owning the exec context
  1031. * @id: the id of the interrupt context
  1032. *
  1033. * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
  1034. * 'id' registered with the OS
  1035. */
  1036. int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
  1037. uint8_t id);
  1038. uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
  1039. uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  1040. uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
  1041. void *cb_ctx, const char *context_name,
  1042. enum hif_exec_type type, uint32_t scale);
  1043. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  1044. const char *context_name);
  1045. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  1046. u_int8_t pipeid,
  1047. struct hif_msg_callbacks *callbacks);
  1048. /**
  1049. * hif_print_napi_stats() - Display HIF NAPI stats
  1050. * @hif_ctx - HIF opaque context
  1051. *
  1052. * Return: None
  1053. */
  1054. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
  1055. /* hif_clear_napi_stats() - function clears the stats of the
  1056. * latency when called.
  1057. * @hif_ctx - the HIF context to assign the callback to
  1058. *
  1059. * Return: None
  1060. */
  1061. void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
  1062. #ifdef __cplusplus
  1063. }
  1064. #endif
  1065. #ifdef FORCE_WAKE
  1066. /**
  1067. * hif_force_wake_request() - Function to wake from power collapse
  1068. * @handle: HIF opaque handle
  1069. *
  1070. * Description: API to check if the device is awake or not before
  1071. * read/write to BAR + 4K registers. If device is awake return
  1072. * success otherwise write '1' to
  1073. * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
  1074. * the device and does wakeup the PCI and MHI within 50ms
  1075. * and then the device writes a value to
  1076. * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
  1077. * handshake process to let the host know the device is awake.
  1078. *
  1079. * Return: zero - success/non-zero - failure
  1080. */
  1081. int hif_force_wake_request(struct hif_opaque_softc *handle);
  1082. /**
  1083. * hif_force_wake_release() - API to release/reset the SOC wake register
  1084. * from interrupting the device.
  1085. * @handle: HIF opaque handle
  1086. *
  1087. * Description: API to set the
  1088. * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
  1089. * to release the interrupt line.
  1090. *
  1091. * Return: zero - success/non-zero - failure
  1092. */
  1093. int hif_force_wake_release(struct hif_opaque_softc *handle);
  1094. #else
  1095. static inline
  1096. int hif_force_wake_request(struct hif_opaque_softc *handle)
  1097. {
  1098. return 0;
  1099. }
  1100. static inline
  1101. int hif_force_wake_release(struct hif_opaque_softc *handle)
  1102. {
  1103. return 0;
  1104. }
  1105. #endif /* FORCE_WAKE */
  1106. #ifdef FEATURE_HAL_DELAYED_REG_WRITE
  1107. /**
  1108. * hif_prevent_link_low_power_states() - Prevent from going to low power states
  1109. * @hif - HIF opaque context
  1110. *
  1111. * Return: 0 on success. Error code on failure.
  1112. */
  1113. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
  1114. /**
  1115. * hif_allow_link_low_power_states() - Allow link to go to low power states
  1116. * @hif - HIF opaque context
  1117. *
  1118. * Return: None
  1119. */
  1120. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
  1121. #else
  1122. static inline
  1123. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
  1124. {
  1125. return 0;
  1126. }
  1127. static inline
  1128. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
  1129. {
  1130. }
  1131. #endif
  1132. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
  1133. /**
  1134. * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
  1135. * @hif_ctx - the HIF context to assign the callback to
  1136. * @callback - the callback to assign
  1137. * @priv - the private data to pass to the callback when invoked
  1138. *
  1139. * Return: None
  1140. */
  1141. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  1142. void (*callback)(void *),
  1143. void *priv);
  1144. /*
  1145. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  1146. * for defined here
  1147. */
  1148. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  1149. ssize_t hif_dump_desc_trace_buf(struct device *dev,
  1150. struct device_attribute *attr, char *buf);
  1151. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  1152. const char *buf, size_t size);
  1153. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
  1154. const char *buf, size_t size);
  1155. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
  1156. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
  1157. #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
  1158. /**
  1159. * hif_set_ce_service_max_yield_time() - sets CE service max yield time
  1160. * @hif: hif context
  1161. * @ce_service_max_yield_time: CE service max yield time to set
  1162. *
  1163. * This API storess CE service max yield time in hif context based
  1164. * on ini value.
  1165. *
  1166. * Return: void
  1167. */
  1168. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  1169. uint32_t ce_service_max_yield_time);
  1170. /**
  1171. * hif_get_ce_service_max_yield_time() - get CE service max yield time
  1172. * @hif: hif context
  1173. *
  1174. * This API returns CE service max yield time.
  1175. *
  1176. * Return: CE service max yield time
  1177. */
  1178. unsigned long long
  1179. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
  1180. /**
  1181. * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
  1182. * @hif: hif context
  1183. * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
  1184. *
  1185. * This API stores CE service max rx ind flush in hif context based
  1186. * on ini value.
  1187. *
  1188. * Return: void
  1189. */
  1190. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  1191. uint8_t ce_service_max_rx_ind_flush);
  1192. #ifdef OL_ATH_SMART_LOGGING
  1193. /*
  1194. * hif_log_ce_dump() - Copy all the CE DEST ring to buf
  1195. * @scn : HIF handler
  1196. * @buf_cur: Current pointer in ring buffer
  1197. * @buf_init:Start of the ring buffer
  1198. * @buf_sz: Size of the ring buffer
  1199. * @ce: Copy Engine id
  1200. * @skb_sz: Max size of the SKB buffer to be copied
  1201. *
  1202. * Calls the respective function to dump all the CE SRC/DEST ring descriptors
  1203. * and buffers pointed by them in to the given buf
  1204. *
  1205. * Return: Current pointer in ring buffer
  1206. */
  1207. uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
  1208. uint8_t *buf_init, uint32_t buf_sz,
  1209. uint32_t ce, uint32_t skb_sz);
  1210. #endif /* OL_ATH_SMART_LOGGING */
  1211. /*
  1212. * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
  1213. * to hif_opaque_softc handle
  1214. * @hif_handle - hif_softc type
  1215. *
  1216. * Return: hif_opaque_softc type
  1217. */
  1218. static inline struct hif_opaque_softc *
  1219. hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
  1220. {
  1221. return (struct hif_opaque_softc *)hif_handle;
  1222. }
  1223. #ifdef FORCE_WAKE
  1224. /**
  1225. * hif_srng_init_phase(): Indicate srng initialization phase
  1226. * to avoid force wake as UMAC power collapse is not yet
  1227. * enabled
  1228. * @hif_ctx: hif opaque handle
  1229. * @init_phase: initialization phase
  1230. *
  1231. * Return: None
  1232. */
  1233. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  1234. bool init_phase);
  1235. #else
  1236. static inline
  1237. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  1238. bool init_phase)
  1239. {
  1240. }
  1241. #endif /* FORCE_WAKE */
  1242. #endif /* _HIF_H_ */