hif.h 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _HIF_H_
  19. #define _HIF_H_
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif /* __cplusplus */
  23. /* Header files */
  24. #include <qdf_status.h>
  25. #include "qdf_nbuf.h"
  26. #include "qdf_lro.h"
  27. #include "ol_if_athvar.h"
  28. #include <linux/platform_device.h>
  29. #ifdef HIF_PCI
  30. #include <linux/pci.h>
  31. #endif /* HIF_PCI */
  32. #ifdef HIF_USB
  33. #include <linux/usb.h>
  34. #endif /* HIF_USB */
  35. #ifdef IPA_OFFLOAD
  36. #include <linux/ipa.h>
  37. #endif
  38. #include "cfg_ucfg_api.h"
  39. #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
  40. typedef void __iomem *A_target_id_t;
  41. typedef void *hif_handle_t;
  42. #define HIF_TYPE_AR6002 2
  43. #define HIF_TYPE_AR6003 3
  44. #define HIF_TYPE_AR6004 5
  45. #define HIF_TYPE_AR9888 6
  46. #define HIF_TYPE_AR6320 7
  47. #define HIF_TYPE_AR6320V2 8
  48. /* For attaching Peregrine 2.0 board host_reg_tbl only */
  49. #define HIF_TYPE_AR9888V2 9
  50. #define HIF_TYPE_ADRASTEA 10
  51. #define HIF_TYPE_AR900B 11
  52. #define HIF_TYPE_QCA9984 12
  53. #define HIF_TYPE_IPQ4019 13
  54. #define HIF_TYPE_QCA9888 14
  55. #define HIF_TYPE_QCA8074 15
  56. #define HIF_TYPE_QCA6290 16
  57. #define HIF_TYPE_QCN7605 17
  58. #define HIF_TYPE_QCA6390 18
  59. #define HIF_TYPE_QCA8074V2 19
  60. #define HIF_TYPE_QCA6018 20
  61. #define HIF_TYPE_QCN9000 21
  62. #define HIF_TYPE_QCA6490 22
  63. #define HIF_TYPE_QCA6750 23
  64. #ifdef IPA_OFFLOAD
  65. #define DMA_COHERENT_MASK_IPA_VER_3_AND_ABOVE 37
  66. #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
  67. #endif
  68. /* enum hif_ic_irq - enum defining integrated chip irq numbers
  69. * defining irq nubers that can be used by external modules like datapath
  70. */
  71. enum hif_ic_irq {
  72. host2wbm_desc_feed = 16,
  73. host2reo_re_injection,
  74. host2reo_command,
  75. host2rxdma_monitor_ring3,
  76. host2rxdma_monitor_ring2,
  77. host2rxdma_monitor_ring1,
  78. reo2host_exception,
  79. wbm2host_rx_release,
  80. reo2host_status,
  81. reo2host_destination_ring4,
  82. reo2host_destination_ring3,
  83. reo2host_destination_ring2,
  84. reo2host_destination_ring1,
  85. rxdma2host_monitor_destination_mac3,
  86. rxdma2host_monitor_destination_mac2,
  87. rxdma2host_monitor_destination_mac1,
  88. ppdu_end_interrupts_mac3,
  89. ppdu_end_interrupts_mac2,
  90. ppdu_end_interrupts_mac1,
  91. rxdma2host_monitor_status_ring_mac3,
  92. rxdma2host_monitor_status_ring_mac2,
  93. rxdma2host_monitor_status_ring_mac1,
  94. host2rxdma_host_buf_ring_mac3,
  95. host2rxdma_host_buf_ring_mac2,
  96. host2rxdma_host_buf_ring_mac1,
  97. rxdma2host_destination_ring_mac3,
  98. rxdma2host_destination_ring_mac2,
  99. rxdma2host_destination_ring_mac1,
  100. host2tcl_input_ring4,
  101. host2tcl_input_ring3,
  102. host2tcl_input_ring2,
  103. host2tcl_input_ring1,
  104. wbm2host_tx_completions_ring3,
  105. wbm2host_tx_completions_ring2,
  106. wbm2host_tx_completions_ring1,
  107. tcl2host_status_ring,
  108. };
  109. struct CE_state;
  110. #define CE_COUNT_MAX 12
  111. #define HIF_MAX_GRP_IRQ 16
  112. #ifndef HIF_MAX_GROUP
  113. #define HIF_MAX_GROUP 7
  114. #endif
  115. #ifndef NAPI_YIELD_BUDGET_BASED
  116. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  117. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 3
  118. #else
  119. #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
  120. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
  121. #endif
  122. #endif /* SLUB_DEBUG_ON */
  123. #else /* NAPI_YIELD_BUDGET_BASED */
  124. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
  125. #endif /* NAPI_YIELD_BUDGET_BASED */
  126. #define QCA_NAPI_BUDGET 64
  127. #define QCA_NAPI_DEF_SCALE \
  128. (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
  129. #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
  130. /* NOTE: "napi->scale" can be changed,
  131. * but this does not change the number of buckets
  132. */
  133. #define QCA_NAPI_NUM_BUCKETS 4
  134. /**
  135. * qca_napi_stat - stats structure for execution contexts
  136. * @napi_schedules - number of times the schedule function is called
  137. * @napi_polls - number of times the execution context runs
  138. * @napi_completes - number of times that the generating interrupt is reenabled
  139. * @napi_workdone - cumulative of all work done reported by handler
  140. * @cpu_corrected - incremented when execution context runs on a different core
  141. * than the one that its irq is affined to.
  142. * @napi_budget_uses - histogram of work done per execution run
  143. * @time_limit_reache - count of yields due to time limit threshholds
  144. * @rxpkt_thresh_reached - count of yields due to a work limit
  145. * @poll_time_buckets - histogram of poll times for the napi
  146. *
  147. */
  148. struct qca_napi_stat {
  149. uint32_t napi_schedules;
  150. uint32_t napi_polls;
  151. uint32_t napi_completes;
  152. uint32_t napi_workdone;
  153. uint32_t cpu_corrected;
  154. uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
  155. uint32_t time_limit_reached;
  156. uint32_t rxpkt_thresh_reached;
  157. unsigned long long napi_max_poll_time;
  158. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  159. uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
  160. #endif
  161. };
  162. /**
  163. * per NAPI instance data structure
  164. * This data structure holds stuff per NAPI instance.
  165. * Note that, in the current implementation, though scale is
  166. * an instance variable, it is set to the same value for all
  167. * instances.
  168. */
  169. struct qca_napi_info {
  170. struct net_device netdev; /* dummy net_dev */
  171. void *hif_ctx;
  172. struct napi_struct napi;
  173. uint8_t scale; /* currently same on all instances */
  174. uint8_t id;
  175. uint8_t cpu;
  176. int irq;
  177. cpumask_t cpumask;
  178. struct qca_napi_stat stats[NR_CPUS];
  179. #ifdef RECEIVE_OFFLOAD
  180. /* will only be present for data rx CE's */
  181. void (*offld_flush_cb)(void *);
  182. struct napi_struct rx_thread_napi;
  183. struct net_device rx_thread_netdev;
  184. #endif /* RECEIVE_OFFLOAD */
  185. qdf_lro_ctx_t lro_ctx;
  186. };
  187. enum qca_napi_tput_state {
  188. QCA_NAPI_TPUT_UNINITIALIZED,
  189. QCA_NAPI_TPUT_LO,
  190. QCA_NAPI_TPUT_HI
  191. };
  192. enum qca_napi_cpu_state {
  193. QCA_NAPI_CPU_UNINITIALIZED,
  194. QCA_NAPI_CPU_DOWN,
  195. QCA_NAPI_CPU_UP };
  196. /**
  197. * struct qca_napi_cpu - an entry of the napi cpu table
  198. * @core_id: physical core id of the core
  199. * @cluster_id: cluster this core belongs to
  200. * @core_mask: mask to match all core of this cluster
  201. * @thread_mask: mask for this core within the cluster
  202. * @max_freq: maximum clock this core can be clocked at
  203. * same for all cpus of the same core.
  204. * @napis: bitmap of napi instances on this core
  205. * @execs: bitmap of execution contexts on this core
  206. * cluster_nxt: chain to link cores within the same cluster
  207. *
  208. * This structure represents a single entry in the napi cpu
  209. * table. The table is part of struct qca_napi_data.
  210. * This table is initialized by the init function, called while
  211. * the first napi instance is being created, updated by hotplug
  212. * notifier and when cpu affinity decisions are made (by throughput
  213. * detection), and deleted when the last napi instance is removed.
  214. */
  215. struct qca_napi_cpu {
  216. enum qca_napi_cpu_state state;
  217. int core_id;
  218. int cluster_id;
  219. cpumask_t core_mask;
  220. cpumask_t thread_mask;
  221. unsigned int max_freq;
  222. uint32_t napis;
  223. uint32_t execs;
  224. int cluster_nxt; /* index, not pointer */
  225. };
  226. /**
  227. * struct qca_napi_data - collection of napi data for a single hif context
  228. * @hif_softc: pointer to the hif context
  229. * @lock: spinlock used in the event state machine
  230. * @state: state variable used in the napi stat machine
  231. * @ce_map: bit map indicating which ce's have napis running
  232. * @exec_map: bit map of instanciated exec contexts
  233. * @user_cpu_affin_map: CPU affinity map from INI config.
  234. * @napi_cpu: cpu info for irq affinty
  235. * @lilcl_head:
  236. * @bigcl_head:
  237. * @napi_mode: irq affinity & clock voting mode
  238. * @cpuhp_handler: CPU hotplug event registration handle
  239. */
  240. struct qca_napi_data {
  241. struct hif_softc *hif_softc;
  242. qdf_spinlock_t lock;
  243. uint32_t state;
  244. /* bitmap of created/registered NAPI instances, indexed by pipe_id,
  245. * not used by clients (clients use an id returned by create)
  246. */
  247. uint32_t ce_map;
  248. uint32_t exec_map;
  249. uint32_t user_cpu_affin_mask;
  250. struct qca_napi_info *napis[CE_COUNT_MAX];
  251. struct qca_napi_cpu napi_cpu[NR_CPUS];
  252. int lilcl_head, bigcl_head;
  253. enum qca_napi_tput_state napi_mode;
  254. struct qdf_cpuhp_handler *cpuhp_handler;
  255. uint8_t flags;
  256. };
  257. /**
  258. * struct hif_config_info - Place Holder for HIF configuration
  259. * @enable_self_recovery: Self Recovery
  260. * @enable_runtime_pm: Enable Runtime PM
  261. * @runtime_pm_delay: Runtime PM Delay
  262. * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
  263. *
  264. * Structure for holding HIF ini parameters.
  265. */
  266. struct hif_config_info {
  267. bool enable_self_recovery;
  268. #ifdef FEATURE_RUNTIME_PM
  269. uint8_t enable_runtime_pm;
  270. u_int32_t runtime_pm_delay;
  271. #endif
  272. uint64_t rx_softirq_max_yield_duration_ns;
  273. };
  274. /**
  275. * struct hif_target_info - Target Information
  276. * @target_version: Target Version
  277. * @target_type: Target Type
  278. * @target_revision: Target Revision
  279. * @soc_version: SOC Version
  280. * @hw_name: pointer to hardware name
  281. *
  282. * Structure to hold target information.
  283. */
  284. struct hif_target_info {
  285. uint32_t target_version;
  286. uint32_t target_type;
  287. uint32_t target_revision;
  288. uint32_t soc_version;
  289. char *hw_name;
  290. };
  291. struct hif_opaque_softc {
  292. };
  293. /**
  294. * enum hif_event_type - Type of DP events to be recorded
  295. * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
  296. * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
  297. * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
  298. * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
  299. */
  300. enum hif_event_type {
  301. HIF_EVENT_IRQ_TRIGGER,
  302. HIF_EVENT_BH_SCHED,
  303. HIF_EVENT_SRNG_ACCESS_START,
  304. HIF_EVENT_SRNG_ACCESS_END,
  305. };
  306. #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
  307. /* HIF_EVENT_HIST_MAX should always be power of 2 */
  308. #define HIF_EVENT_HIST_MAX 512
  309. #define HIF_NUM_INT_CONTEXTS HIF_MAX_GROUP
  310. #define HIF_EVENT_HIST_DISABLE_MASK 0
  311. /**
  312. * struct hif_event_record - an entry of the DP event history
  313. * @hal_ring_id: ring id for which event is recorded
  314. * @hp: head pointer of the ring (may not be applicable for all events)
  315. * @tp: tail pointer of the ring (may not be applicable for all events)
  316. * @cpu_id: cpu id on which the event occurred
  317. * @timestamp: timestamp when event occurred
  318. * @type: type of the event
  319. *
  320. * This structure represents the information stored for every datapath
  321. * event which is logged in the history.
  322. */
  323. struct hif_event_record {
  324. uint8_t hal_ring_id;
  325. uint32_t hp;
  326. uint32_t tp;
  327. int cpu_id;
  328. uint64_t timestamp;
  329. enum hif_event_type type;
  330. };
  331. /**
  332. * struct hif_event_history - history for one interrupt group
  333. * @index: index to store new event
  334. * @event: event entry
  335. *
  336. * This structure represents the datapath history for one
  337. * interrupt group.
  338. */
  339. struct hif_event_history {
  340. qdf_atomic_t index;
  341. struct hif_event_record event[HIF_EVENT_HIST_MAX];
  342. };
  343. /**
  344. * hif_hist_record_event() - Record one datapath event in history
  345. * @hif_ctx: HIF opaque context
  346. * @event: DP event entry
  347. * @intr_grp_id: interrupt group ID registered with hif
  348. *
  349. * Return: None
  350. */
  351. void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
  352. struct hif_event_record *event,
  353. uint8_t intr_grp_id);
  354. /**
  355. * hif_record_event() - Wrapper function to form and record DP event
  356. * @hif_ctx: HIF opaque context
  357. * @intr_grp_id: interrupt group ID registered with hif
  358. * @hal_ring_id: ring id for which event is recorded
  359. * @hp: head pointer index of the srng
  360. * @tp: tail pointer index of the srng
  361. * @type: type of the event to be logged in history
  362. *
  363. * Return: None
  364. */
  365. static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
  366. uint8_t intr_grp_id,
  367. uint8_t hal_ring_id,
  368. uint32_t hp,
  369. uint32_t tp,
  370. enum hif_event_type type)
  371. {
  372. struct hif_event_record event;
  373. event.hal_ring_id = hal_ring_id;
  374. event.hp = hp;
  375. event.tp = tp;
  376. event.type = type;
  377. return hif_hist_record_event(hif_ctx, &event,
  378. intr_grp_id);
  379. }
  380. #else
  381. static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
  382. uint8_t intr_grp_id,
  383. uint8_t hal_ring_id,
  384. uint32_t hp,
  385. uint32_t tp,
  386. enum hif_event_type type)
  387. {
  388. }
  389. #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
  390. /**
  391. * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
  392. *
  393. * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
  394. * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
  395. * minimize power
  396. * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
  397. * platform-specific measures to completely power-off
  398. * the module and associated hardware (i.e. cut power
  399. * supplies)
  400. */
  401. enum HIF_DEVICE_POWER_CHANGE_TYPE {
  402. HIF_DEVICE_POWER_UP,
  403. HIF_DEVICE_POWER_DOWN,
  404. HIF_DEVICE_POWER_CUT
  405. };
  406. /**
  407. * enum hif_enable_type: what triggered the enabling of hif
  408. *
  409. * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
  410. * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
  411. */
  412. enum hif_enable_type {
  413. HIF_ENABLE_TYPE_PROBE,
  414. HIF_ENABLE_TYPE_REINIT,
  415. HIF_ENABLE_TYPE_MAX
  416. };
  417. /**
  418. * enum hif_disable_type: what triggered the disabling of hif
  419. *
  420. * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
  421. * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
  422. * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
  423. * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
  424. */
  425. enum hif_disable_type {
  426. HIF_DISABLE_TYPE_PROBE_ERROR,
  427. HIF_DISABLE_TYPE_REINIT_ERROR,
  428. HIF_DISABLE_TYPE_REMOVE,
  429. HIF_DISABLE_TYPE_SHUTDOWN,
  430. HIF_DISABLE_TYPE_MAX
  431. };
  432. /**
  433. * enum hif_device_config_opcode: configure mode
  434. *
  435. * @HIF_DEVICE_POWER_STATE: device power state
  436. * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
  437. * @HIF_DEVICE_GET_ADDR: get block address
  438. * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
  439. * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
  440. * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
  441. * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
  442. * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
  443. * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
  444. * @HIF_DEVICE_GET_OS_DEVICE: get OS device
  445. * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
  446. * @HIF_BMI_DONE: bmi done
  447. * @HIF_DEVICE_SET_TARGET_TYPE: set target type
  448. * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
  449. * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
  450. */
  451. enum hif_device_config_opcode {
  452. HIF_DEVICE_POWER_STATE = 0,
  453. HIF_DEVICE_GET_BLOCK_SIZE,
  454. HIF_DEVICE_GET_FIFO_ADDR,
  455. HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
  456. HIF_DEVICE_GET_IRQ_PROC_MODE,
  457. HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
  458. HIF_DEVICE_POWER_STATE_CHANGE,
  459. HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
  460. HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
  461. HIF_DEVICE_GET_OS_DEVICE,
  462. HIF_DEVICE_DEBUG_BUS_STATE,
  463. HIF_BMI_DONE,
  464. HIF_DEVICE_SET_TARGET_TYPE,
  465. HIF_DEVICE_SET_HTC_CONTEXT,
  466. HIF_DEVICE_GET_HTC_CONTEXT,
  467. };
  468. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  469. struct HID_ACCESS_LOG {
  470. uint32_t seqnum;
  471. bool is_write;
  472. void *addr;
  473. uint32_t value;
  474. };
  475. #endif
  476. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  477. uint32_t value);
  478. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
  479. #define HIF_MAX_DEVICES 1
  480. /**
  481. * struct htc_callbacks - Structure for HTC Callbacks methods
  482. * @context: context to pass to the dsrhandler
  483. * note : rwCompletionHandler is provided the context
  484. * passed to hif_read_write
  485. * @rwCompletionHandler: Read / write completion handler
  486. * @dsrHandler: DSR Handler
  487. */
  488. struct htc_callbacks {
  489. void *context;
  490. QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
  491. QDF_STATUS(*dsr_handler)(void *context);
  492. };
  493. /**
  494. * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
  495. * @context: Private data context
  496. * @set_recovery_in_progress: To Set Driver state for recovery in progress
  497. * @is_recovery_in_progress: Query if driver state is recovery in progress
  498. * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
  499. * @is_driver_unloading: Query if driver is unloading.
  500. *
  501. * This Structure provides callback pointer for HIF to query hdd for driver
  502. * states.
  503. */
  504. struct hif_driver_state_callbacks {
  505. void *context;
  506. void (*set_recovery_in_progress)(void *context, uint8_t val);
  507. bool (*is_recovery_in_progress)(void *context);
  508. bool (*is_load_unload_in_progress)(void *context);
  509. bool (*is_driver_unloading)(void *context);
  510. bool (*is_target_ready)(void *context);
  511. };
  512. /* This API detaches the HTC layer from the HIF device */
  513. void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
  514. /****************************************************************/
  515. /* BMI and Diag window abstraction */
  516. /****************************************************************/
  517. #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
  518. #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
  519. * handled atomically by
  520. * DiagRead/DiagWrite
  521. */
  522. #ifdef WLAN_FEATURE_BMI
  523. /*
  524. * API to handle HIF-specific BMI message exchanges, this API is synchronous
  525. * and only allowed to be called from a context that can block (sleep)
  526. */
  527. QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
  528. qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
  529. uint8_t *pSendMessage, uint32_t Length,
  530. uint8_t *pResponseMessage,
  531. uint32_t *pResponseLength, uint32_t TimeoutMS);
  532. void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
  533. bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
  534. #else /* WLAN_FEATURE_BMI */
  535. static inline void
  536. hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
  537. {
  538. }
  539. static inline bool
  540. hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
  541. {
  542. return false;
  543. }
  544. #endif /* WLAN_FEATURE_BMI */
  545. /*
  546. * APIs to handle HIF specific diagnostic read accesses. These APIs are
  547. * synchronous and only allowed to be called from a context that
  548. * can block (sleep). They are not high performance APIs.
  549. *
  550. * hif_diag_read_access reads a 4 Byte aligned/length value from a
  551. * Target register or memory word.
  552. *
  553. * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
  554. */
  555. QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
  556. uint32_t address, uint32_t *data);
  557. QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
  558. uint8_t *data, int nbytes);
  559. void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
  560. void *ramdump_base, uint32_t address, uint32_t size);
  561. /*
  562. * APIs to handle HIF specific diagnostic write accesses. These APIs are
  563. * synchronous and only allowed to be called from a context that
  564. * can block (sleep).
  565. * They are not high performance APIs.
  566. *
  567. * hif_diag_write_access writes a 4 Byte aligned/length value to a
  568. * Target register or memory word.
  569. *
  570. * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
  571. */
  572. QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
  573. uint32_t address, uint32_t data);
  574. QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
  575. uint32_t address, uint8_t *data, int nbytes);
  576. typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
  577. void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
  578. bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
  579. /*
  580. * Set the FASTPATH_mode_on flag in sc, for use by data path
  581. */
  582. #ifdef WLAN_FEATURE_FASTPATH
  583. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
  584. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
  585. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
  586. int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  587. fastpath_msg_handler handler, void *context);
  588. #else
  589. static inline int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
  590. fastpath_msg_handler handler,
  591. void *context)
  592. {
  593. return QDF_STATUS_E_FAILURE;
  594. }
  595. static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
  596. {
  597. return NULL;
  598. }
  599. #endif
  600. /*
  601. * Enable/disable CDC max performance workaround
  602. * For max-performace set this to 0
  603. * To allow SoC to enter sleep set this to 1
  604. */
  605. #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
  606. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  607. qdf_shared_mem_t **ce_sr,
  608. uint32_t *ce_sr_ring_size,
  609. qdf_dma_addr_t *ce_reg_paddr);
  610. /**
  611. * @brief List of callbacks - filled in by HTC.
  612. */
  613. struct hif_msg_callbacks {
  614. void *Context;
  615. /**< context meaningful to HTC */
  616. QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  617. uint32_t transferID,
  618. uint32_t toeplitz_hash_result);
  619. QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  620. uint8_t pipeID);
  621. void (*txResourceAvailHandler)(void *context, uint8_t pipe);
  622. void (*fwEventHandler)(void *context, QDF_STATUS status);
  623. void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
  624. };
  625. enum hif_target_status {
  626. TARGET_STATUS_CONNECTED = 0, /* target connected */
  627. TARGET_STATUS_RESET, /* target got reset */
  628. TARGET_STATUS_EJECT, /* target got ejected */
  629. TARGET_STATUS_SUSPEND /*target got suspend */
  630. };
  631. /**
  632. * enum hif_attribute_flags: configure hif
  633. *
  634. * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
  635. * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
  636. * + No pktlog CE
  637. */
  638. enum hif_attribute_flags {
  639. HIF_LOWDESC_CE_CFG = 1,
  640. HIF_LOWDESC_CE_NO_PKTLOG_CFG
  641. };
  642. #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
  643. (attr |= (v & 0x01) << 5)
  644. #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
  645. (attr |= (v & 0x03) << 6)
  646. #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
  647. (attr |= (v & 0x01) << 13)
  648. #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
  649. (attr |= (v & 0x01) << 14)
  650. #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
  651. (attr |= (v & 0x01) << 15)
  652. #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
  653. (attr |= (v & 0x0FFF) << 16)
  654. #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
  655. (attr |= (v & 0x01) << 30)
  656. struct hif_ul_pipe_info {
  657. unsigned int nentries;
  658. unsigned int nentries_mask;
  659. unsigned int sw_index;
  660. unsigned int write_index; /* cached copy */
  661. unsigned int hw_index; /* cached copy */
  662. void *base_addr_owner_space; /* Host address space */
  663. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  664. };
  665. struct hif_dl_pipe_info {
  666. unsigned int nentries;
  667. unsigned int nentries_mask;
  668. unsigned int sw_index;
  669. unsigned int write_index; /* cached copy */
  670. unsigned int hw_index; /* cached copy */
  671. void *base_addr_owner_space; /* Host address space */
  672. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  673. };
  674. struct hif_pipe_addl_info {
  675. uint32_t pci_mem;
  676. uint32_t ctrl_addr;
  677. struct hif_ul_pipe_info ul_pipe;
  678. struct hif_dl_pipe_info dl_pipe;
  679. };
  680. #ifdef CONFIG_SLUB_DEBUG_ON
  681. #define MSG_FLUSH_NUM 16
  682. #else /* PERF build */
  683. #define MSG_FLUSH_NUM 32
  684. #endif /* SLUB_DEBUG_ON */
  685. struct hif_bus_id;
  686. void hif_claim_device(struct hif_opaque_softc *hif_ctx);
  687. QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
  688. int opcode, void *config, uint32_t config_len);
  689. void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
  690. void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
  691. void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
  692. struct hif_msg_callbacks *callbacks);
  693. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
  694. void hif_stop(struct hif_opaque_softc *hif_ctx);
  695. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
  696. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
  697. void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
  698. uint8_t cmd_id, bool start);
  699. QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  700. uint32_t transferID, uint32_t nbytes,
  701. qdf_nbuf_t wbuf, uint32_t data_attr);
  702. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  703. int force);
  704. void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
  705. void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
  706. uint8_t *DLPipe);
  707. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
  708. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  709. int *dl_is_polled);
  710. uint16_t
  711. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
  712. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
  713. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
  714. void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
  715. bool wait_for_it);
  716. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
  717. #ifndef HIF_PCI
  718. static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  719. {
  720. return 0;
  721. }
  722. #else
  723. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
  724. #endif
  725. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  726. u32 *revision, const char **target_name);
  727. #ifdef RECEIVE_OFFLOAD
  728. /**
  729. * hif_offld_flush_cb_register() - Register the offld flush callback
  730. * @scn: HIF opaque context
  731. * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
  732. * Or GRO/LRO flush when RxThread is not enabled. Called
  733. * with corresponding context for flush.
  734. * Return: None
  735. */
  736. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  737. void (offld_flush_handler)(void *ol_ctx));
  738. /**
  739. * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
  740. * @scn: HIF opaque context
  741. *
  742. * Return: None
  743. */
  744. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
  745. #endif
  746. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  747. /**
  748. * hif_exec_should_yield() - Check if hif napi context should yield
  749. * @hif_ctx - HIF opaque context
  750. * @grp_id - grp_id of the napi for which check needs to be done
  751. *
  752. * The function uses grp_id to look for NAPI and checks if NAPI needs to
  753. * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
  754. * yield decision.
  755. *
  756. * Return: true if NAPI needs to yield, else false
  757. */
  758. bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
  759. #else
  760. static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
  761. uint grp_id)
  762. {
  763. return false;
  764. }
  765. #endif
  766. void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
  767. void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
  768. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  769. int htc_htt_tx_endpoint);
  770. /**
  771. * hif_open() - Create hif handle
  772. * @qdf_ctx: qdf context
  773. * @mode: Driver Mode
  774. * @bus_type: Bus Type
  775. * @cbk: CDS Callbacks
  776. * @psoc: psoc object manager
  777. *
  778. * API to open HIF Context
  779. *
  780. * Return: HIF Opaque Pointer
  781. */
  782. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  783. uint32_t mode,
  784. enum qdf_bus_type bus_type,
  785. struct hif_driver_state_callbacks *cbk,
  786. struct wlan_objmgr_psoc *psoc);
  787. void hif_close(struct hif_opaque_softc *hif_ctx);
  788. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  789. void *bdev, const struct hif_bus_id *bid,
  790. enum qdf_bus_type bus_type,
  791. enum hif_enable_type type);
  792. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
  793. void hif_display_stats(struct hif_opaque_softc *hif_ctx);
  794. void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
  795. #ifdef FEATURE_RUNTIME_PM
  796. struct hif_pm_runtime_lock;
  797. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
  798. int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx);
  799. int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx);
  800. int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
  801. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx);
  802. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx);
  803. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx);
  804. void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
  805. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
  806. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  807. struct hif_pm_runtime_lock *lock);
  808. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  809. struct hif_pm_runtime_lock *lock);
  810. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  811. struct hif_pm_runtime_lock *lock);
  812. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  813. struct hif_pm_runtime_lock *lock, unsigned int delay);
  814. bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
  815. int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
  816. void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
  817. int val);
  818. void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
  819. int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
  820. qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
  821. #else
  822. struct hif_pm_runtime_lock {
  823. const char *name;
  824. };
  825. static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  826. static inline int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx)
  827. { return 0; }
  828. static inline int
  829. hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx)
  830. { return 0; }
  831. static inline int
  832. hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
  833. { return 0; }
  834. static inline void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  835. {}
  836. static inline int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  837. { return 0; }
  838. static inline int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  839. { return 0; }
  840. static inline void
  841. hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
  842. static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
  843. const char *name)
  844. { return 0; }
  845. static inline void
  846. hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  847. struct hif_pm_runtime_lock *lock) {}
  848. static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  849. struct hif_pm_runtime_lock *lock)
  850. { return 0; }
  851. static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  852. struct hif_pm_runtime_lock *lock)
  853. { return 0; }
  854. static inline int
  855. hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  856. struct hif_pm_runtime_lock *lock, unsigned int delay)
  857. { return 0; }
  858. static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
  859. { return false; }
  860. static inline int
  861. hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
  862. { return 0; }
  863. static inline void
  864. hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
  865. { return; }
  866. static inline void
  867. hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
  868. static inline int
  869. hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  870. { return 0; }
  871. static inline qdf_time_t
  872. hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
  873. { return 0; }
  874. #endif
  875. void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
  876. bool is_packet_log_enabled);
  877. void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
  878. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
  879. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
  880. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
  881. #ifdef IPA_OFFLOAD
  882. /**
  883. * hif_get_ipa_hw_type() - get IPA hw type
  884. *
  885. * This API return the IPA hw type.
  886. *
  887. * Return: IPA hw type
  888. */
  889. static inline
  890. enum ipa_hw_type hif_get_ipa_hw_type(void)
  891. {
  892. return ipa_get_hw_type();
  893. }
  894. /**
  895. * hif_get_ipa_present() - get IPA hw status
  896. *
  897. * This API return the IPA hw status.
  898. *
  899. * Return: true if IPA is present or false otherwise
  900. */
  901. static inline
  902. bool hif_get_ipa_present(void)
  903. {
  904. if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
  905. return true;
  906. else
  907. return false;
  908. }
  909. #endif
  910. int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
  911. /**
  912. * hif_bus_ealry_suspend() - stop non wmi tx traffic
  913. * @context: hif context
  914. */
  915. int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
  916. /**
  917. * hif_bus_late_resume() - resume non wmi traffic
  918. * @context: hif context
  919. */
  920. int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
  921. int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
  922. int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
  923. int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
  924. /**
  925. * hif_apps_irqs_enable() - Enables all irqs from the APPS side
  926. * @hif_ctx: an opaque HIF handle to use
  927. *
  928. * As opposed to the standard hif_irq_enable, this function always applies to
  929. * the APPS side kernel interrupt handling.
  930. *
  931. * Return: errno
  932. */
  933. int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
  934. /**
  935. * hif_apps_irqs_disable() - Disables all irqs from the APPS side
  936. * @hif_ctx: an opaque HIF handle to use
  937. *
  938. * As opposed to the standard hif_irq_disable, this function always applies to
  939. * the APPS side kernel interrupt handling.
  940. *
  941. * Return: errno
  942. */
  943. int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
  944. /**
  945. * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
  946. * @hif_ctx: an opaque HIF handle to use
  947. *
  948. * As opposed to the standard hif_irq_enable, this function always applies to
  949. * the APPS side kernel interrupt handling.
  950. *
  951. * Return: errno
  952. */
  953. int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
  954. /**
  955. * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
  956. * @hif_ctx: an opaque HIF handle to use
  957. *
  958. * As opposed to the standard hif_irq_disable, this function always applies to
  959. * the APPS side kernel interrupt handling.
  960. *
  961. * Return: errno
  962. */
  963. int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
  964. #ifdef FEATURE_RUNTIME_PM
  965. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  966. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
  967. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  968. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
  969. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
  970. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
  971. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
  972. #endif
  973. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
  974. int hif_dump_registers(struct hif_opaque_softc *scn);
  975. int ol_copy_ramdump(struct hif_opaque_softc *scn);
  976. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
  977. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  978. u32 *revision, const char **target_name);
  979. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
  980. struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
  981. scn);
  982. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
  983. struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
  984. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
  985. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  986. hif_target_status);
  987. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  988. struct hif_config_info *cfg);
  989. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
  990. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  991. uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
  992. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  993. uint32_t transfer_id, u_int32_t len);
  994. int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
  995. uint32_t transfer_id, uint32_t download_len);
  996. void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
  997. void hif_ce_war_disable(void);
  998. void hif_ce_war_enable(void);
  999. void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
  1000. #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
  1001. struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
  1002. struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
  1003. uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
  1004. uint32_t pipe_num);
  1005. int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
  1006. #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
  1007. void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
  1008. int rx_bundle_cnt);
  1009. int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
  1010. void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
  1011. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
  1012. enum hif_exec_type {
  1013. HIF_EXEC_NAPI_TYPE,
  1014. HIF_EXEC_TASKLET_TYPE,
  1015. };
  1016. typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
  1017. /**
  1018. * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
  1019. * @softc: hif opaque context owning the exec context
  1020. * @id: the id of the interrupt context
  1021. *
  1022. * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
  1023. * 'id' registered with the OS
  1024. */
  1025. int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
  1026. uint8_t id);
  1027. uint32_t hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
  1028. uint32_t hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  1029. uint32_t numirq, uint32_t irq[], ext_intr_handler handler,
  1030. void *cb_ctx, const char *context_name,
  1031. enum hif_exec_type type, uint32_t scale);
  1032. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  1033. const char *context_name);
  1034. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  1035. u_int8_t pipeid,
  1036. struct hif_msg_callbacks *callbacks);
  1037. /**
  1038. * hif_print_napi_stats() - Display HIF NAPI stats
  1039. * @hif_ctx - HIF opaque context
  1040. *
  1041. * Return: None
  1042. */
  1043. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
  1044. /* hif_clear_napi_stats() - function clears the stats of the
  1045. * latency when called.
  1046. * @hif_ctx - the HIF context to assign the callback to
  1047. *
  1048. * Return: None
  1049. */
  1050. void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
  1051. #ifdef __cplusplus
  1052. }
  1053. #endif
  1054. #ifdef FORCE_WAKE
  1055. /**
  1056. * hif_force_wake_request() - Function to wake from power collapse
  1057. * @handle: HIF opaque handle
  1058. *
  1059. * Description: API to check if the device is awake or not before
  1060. * read/write to BAR + 4K registers. If device is awake return
  1061. * success otherwise write '1' to
  1062. * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
  1063. * the device and does wakeup the PCI and MHI within 50ms
  1064. * and then the device writes a value to
  1065. * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
  1066. * handshake process to let the host know the device is awake.
  1067. *
  1068. * Return: zero - success/non-zero - failure
  1069. */
  1070. int hif_force_wake_request(struct hif_opaque_softc *handle);
  1071. /**
  1072. * hif_force_wake_release() - API to release/reset the SOC wake register
  1073. * from interrupting the device.
  1074. * @handle: HIF opaque handle
  1075. *
  1076. * Description: API to set the
  1077. * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
  1078. * to release the interrupt line.
  1079. *
  1080. * Return: zero - success/non-zero - failure
  1081. */
  1082. int hif_force_wake_release(struct hif_opaque_softc *handle);
  1083. #else
  1084. static inline
  1085. int hif_force_wake_request(struct hif_opaque_softc *handle)
  1086. {
  1087. return 0;
  1088. }
  1089. static inline
  1090. int hif_force_wake_release(struct hif_opaque_softc *handle)
  1091. {
  1092. return 0;
  1093. }
  1094. #endif /* FORCE_WAKE */
  1095. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
  1096. /**
  1097. * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
  1098. * @hif_ctx - the HIF context to assign the callback to
  1099. * @callback - the callback to assign
  1100. * @priv - the private data to pass to the callback when invoked
  1101. *
  1102. * Return: None
  1103. */
  1104. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  1105. void (*callback)(void *),
  1106. void *priv);
  1107. /*
  1108. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  1109. * for defined here
  1110. */
  1111. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  1112. ssize_t hif_dump_desc_trace_buf(struct device *dev,
  1113. struct device_attribute *attr, char *buf);
  1114. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  1115. const char *buf, size_t size);
  1116. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
  1117. const char *buf, size_t size);
  1118. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
  1119. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
  1120. #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
  1121. /**
  1122. * hif_set_ce_service_max_yield_time() - sets CE service max yield time
  1123. * @hif: hif context
  1124. * @ce_service_max_yield_time: CE service max yield time to set
  1125. *
  1126. * This API storess CE service max yield time in hif context based
  1127. * on ini value.
  1128. *
  1129. * Return: void
  1130. */
  1131. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  1132. uint32_t ce_service_max_yield_time);
  1133. /**
  1134. * hif_get_ce_service_max_yield_time() - get CE service max yield time
  1135. * @hif: hif context
  1136. *
  1137. * This API returns CE service max yield time.
  1138. *
  1139. * Return: CE service max yield time
  1140. */
  1141. unsigned long long
  1142. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
  1143. /**
  1144. * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
  1145. * @hif: hif context
  1146. * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
  1147. *
  1148. * This API stores CE service max rx ind flush in hif context based
  1149. * on ini value.
  1150. *
  1151. * Return: void
  1152. */
  1153. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  1154. uint8_t ce_service_max_rx_ind_flush);
  1155. #ifdef OL_ATH_SMART_LOGGING
  1156. /*
  1157. * hif_log_ce_dump() - Copy all the CE DEST ring to buf
  1158. * @scn : HIF handler
  1159. * @buf_cur: Current pointer in ring buffer
  1160. * @buf_init:Start of the ring buffer
  1161. * @buf_sz: Size of the ring buffer
  1162. * @ce: Copy Engine id
  1163. * @skb_sz: Max size of the SKB buffer to be copied
  1164. *
  1165. * Calls the respective function to dump all the CE SRC/DEST ring descriptors
  1166. * and buffers pointed by them in to the given buf
  1167. *
  1168. * Return: Current pointer in ring buffer
  1169. */
  1170. uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
  1171. uint8_t *buf_init, uint32_t buf_sz,
  1172. uint32_t ce, uint32_t skb_sz);
  1173. #endif /* OL_ATH_SMART_LOGGING */
  1174. /*
  1175. * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
  1176. * to hif_opaque_softc handle
  1177. * @hif_handle - hif_softc type
  1178. *
  1179. * Return: hif_opaque_softc type
  1180. */
  1181. static inline struct hif_opaque_softc *
  1182. hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
  1183. {
  1184. return (struct hif_opaque_softc *)hif_handle;
  1185. }
  1186. #ifdef FORCE_WAKE
  1187. /**
  1188. * hif_srng_init_phase(): Indicate srng initialization phase
  1189. * to avoid force wake as UMAC power collapse is not yet
  1190. * enabled
  1191. * @hif_ctx: hif opaque handle
  1192. * @init_phase: initialization phase
  1193. *
  1194. * Return: None
  1195. */
  1196. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  1197. bool init_phase);
  1198. #else
  1199. static inline
  1200. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  1201. bool init_phase)
  1202. {
  1203. }
  1204. #endif /* FORCE_WAKE */
  1205. #endif /* _HIF_H_ */