hif.h 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659
  1. /*
  2. * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _HIF_H_
  19. #define _HIF_H_
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif /* __cplusplus */
  23. /* Header files */
  24. #include <qdf_status.h>
  25. #include "qdf_nbuf.h"
  26. #include "qdf_lro.h"
  27. #include "ol_if_athvar.h"
  28. #include <linux/platform_device.h>
  29. #ifdef HIF_PCI
  30. #include <linux/pci.h>
  31. #endif /* HIF_PCI */
  32. #ifdef HIF_USB
  33. #include <linux/usb.h>
  34. #endif /* HIF_USB */
  35. #ifdef IPA_OFFLOAD
  36. #include <linux/ipa.h>
  37. #endif
  38. #include "cfg_ucfg_api.h"
  39. #include "qdf_dev.h"
  40. #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
  41. typedef void __iomem *A_target_id_t;
  42. typedef void *hif_handle_t;
  43. #define HIF_TYPE_AR6002 2
  44. #define HIF_TYPE_AR6003 3
  45. #define HIF_TYPE_AR6004 5
  46. #define HIF_TYPE_AR9888 6
  47. #define HIF_TYPE_AR6320 7
  48. #define HIF_TYPE_AR6320V2 8
  49. /* For attaching Peregrine 2.0 board host_reg_tbl only */
  50. #define HIF_TYPE_AR9888V2 9
  51. #define HIF_TYPE_ADRASTEA 10
  52. #define HIF_TYPE_AR900B 11
  53. #define HIF_TYPE_QCA9984 12
  54. #define HIF_TYPE_IPQ4019 13
  55. #define HIF_TYPE_QCA9888 14
  56. #define HIF_TYPE_QCA8074 15
  57. #define HIF_TYPE_QCA6290 16
  58. #define HIF_TYPE_QCN7605 17
  59. #define HIF_TYPE_QCA6390 18
  60. #define HIF_TYPE_QCA8074V2 19
  61. #define HIF_TYPE_QCA6018 20
  62. #define HIF_TYPE_QCN9000 21
  63. #define HIF_TYPE_QCA6490 22
  64. #define HIF_TYPE_QCA6750 23
  65. #define HIF_TYPE_QCA5018 24
  66. #define HIF_TYPE_QCN6122 25
  67. #define DMA_COHERENT_MASK_DEFAULT 37
  68. #ifdef IPA_OFFLOAD
  69. #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
  70. #endif
  71. /* enum hif_ic_irq - enum defining integrated chip irq numbers
  72. * defining irq nubers that can be used by external modules like datapath
  73. */
  74. enum hif_ic_irq {
  75. host2wbm_desc_feed = 16,
  76. host2reo_re_injection,
  77. host2reo_command,
  78. host2rxdma_monitor_ring3,
  79. host2rxdma_monitor_ring2,
  80. host2rxdma_monitor_ring1,
  81. reo2host_exception,
  82. wbm2host_rx_release,
  83. reo2host_status,
  84. reo2host_destination_ring4,
  85. reo2host_destination_ring3,
  86. reo2host_destination_ring2,
  87. reo2host_destination_ring1,
  88. rxdma2host_monitor_destination_mac3,
  89. rxdma2host_monitor_destination_mac2,
  90. rxdma2host_monitor_destination_mac1,
  91. ppdu_end_interrupts_mac3,
  92. ppdu_end_interrupts_mac2,
  93. ppdu_end_interrupts_mac1,
  94. rxdma2host_monitor_status_ring_mac3,
  95. rxdma2host_monitor_status_ring_mac2,
  96. rxdma2host_monitor_status_ring_mac1,
  97. host2rxdma_host_buf_ring_mac3,
  98. host2rxdma_host_buf_ring_mac2,
  99. host2rxdma_host_buf_ring_mac1,
  100. rxdma2host_destination_ring_mac3,
  101. rxdma2host_destination_ring_mac2,
  102. rxdma2host_destination_ring_mac1,
  103. host2tcl_input_ring4,
  104. host2tcl_input_ring3,
  105. host2tcl_input_ring2,
  106. host2tcl_input_ring1,
  107. wbm2host_tx_completions_ring3,
  108. wbm2host_tx_completions_ring2,
  109. wbm2host_tx_completions_ring1,
  110. tcl2host_status_ring,
  111. };
  112. struct CE_state;
  113. #define CE_COUNT_MAX 12
  114. #define HIF_MAX_GRP_IRQ 16
  115. #ifndef HIF_MAX_GROUP
  116. #define HIF_MAX_GROUP 7
  117. #endif
  118. #ifndef NAPI_YIELD_BUDGET_BASED
  119. #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
  120. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
  121. #endif
  122. #else /* NAPI_YIELD_BUDGET_BASED */
  123. #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
  124. #endif /* NAPI_YIELD_BUDGET_BASED */
  125. #define QCA_NAPI_BUDGET 64
  126. #define QCA_NAPI_DEF_SCALE \
  127. (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
  128. #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
  129. /* NOTE: "napi->scale" can be changed,
  130. * but this does not change the number of buckets
  131. */
  132. #define QCA_NAPI_NUM_BUCKETS 4
  133. /**
  134. * qca_napi_stat - stats structure for execution contexts
  135. * @napi_schedules - number of times the schedule function is called
  136. * @napi_polls - number of times the execution context runs
  137. * @napi_completes - number of times that the generating interrupt is reenabled
  138. * @napi_workdone - cumulative of all work done reported by handler
  139. * @cpu_corrected - incremented when execution context runs on a different core
  140. * than the one that its irq is affined to.
  141. * @napi_budget_uses - histogram of work done per execution run
  142. * @time_limit_reache - count of yields due to time limit threshholds
  143. * @rxpkt_thresh_reached - count of yields due to a work limit
  144. * @poll_time_buckets - histogram of poll times for the napi
  145. *
  146. */
  147. struct qca_napi_stat {
  148. uint32_t napi_schedules;
  149. uint32_t napi_polls;
  150. uint32_t napi_completes;
  151. uint32_t napi_workdone;
  152. uint32_t cpu_corrected;
  153. uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
  154. uint32_t time_limit_reached;
  155. uint32_t rxpkt_thresh_reached;
  156. unsigned long long napi_max_poll_time;
  157. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  158. uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
  159. #endif
  160. };
  161. /**
  162. * per NAPI instance data structure
  163. * This data structure holds stuff per NAPI instance.
  164. * Note that, in the current implementation, though scale is
  165. * an instance variable, it is set to the same value for all
  166. * instances.
  167. */
  168. struct qca_napi_info {
  169. struct net_device netdev; /* dummy net_dev */
  170. void *hif_ctx;
  171. struct napi_struct napi;
  172. uint8_t scale; /* currently same on all instances */
  173. uint8_t id;
  174. uint8_t cpu;
  175. int irq;
  176. cpumask_t cpumask;
  177. struct qca_napi_stat stats[NR_CPUS];
  178. #ifdef RECEIVE_OFFLOAD
  179. /* will only be present for data rx CE's */
  180. void (*offld_flush_cb)(void *);
  181. struct napi_struct rx_thread_napi;
  182. struct net_device rx_thread_netdev;
  183. #endif /* RECEIVE_OFFLOAD */
  184. qdf_lro_ctx_t lro_ctx;
  185. };
  186. enum qca_napi_tput_state {
  187. QCA_NAPI_TPUT_UNINITIALIZED,
  188. QCA_NAPI_TPUT_LO,
  189. QCA_NAPI_TPUT_HI
  190. };
  191. enum qca_napi_cpu_state {
  192. QCA_NAPI_CPU_UNINITIALIZED,
  193. QCA_NAPI_CPU_DOWN,
  194. QCA_NAPI_CPU_UP };
  195. /**
  196. * struct qca_napi_cpu - an entry of the napi cpu table
  197. * @core_id: physical core id of the core
  198. * @cluster_id: cluster this core belongs to
  199. * @core_mask: mask to match all core of this cluster
  200. * @thread_mask: mask for this core within the cluster
  201. * @max_freq: maximum clock this core can be clocked at
  202. * same for all cpus of the same core.
  203. * @napis: bitmap of napi instances on this core
  204. * @execs: bitmap of execution contexts on this core
  205. * cluster_nxt: chain to link cores within the same cluster
  206. *
  207. * This structure represents a single entry in the napi cpu
  208. * table. The table is part of struct qca_napi_data.
  209. * This table is initialized by the init function, called while
  210. * the first napi instance is being created, updated by hotplug
  211. * notifier and when cpu affinity decisions are made (by throughput
  212. * detection), and deleted when the last napi instance is removed.
  213. */
  214. struct qca_napi_cpu {
  215. enum qca_napi_cpu_state state;
  216. int core_id;
  217. int cluster_id;
  218. cpumask_t core_mask;
  219. cpumask_t thread_mask;
  220. unsigned int max_freq;
  221. uint32_t napis;
  222. uint32_t execs;
  223. int cluster_nxt; /* index, not pointer */
  224. };
  225. /**
  226. * struct qca_napi_data - collection of napi data for a single hif context
  227. * @hif_softc: pointer to the hif context
  228. * @lock: spinlock used in the event state machine
  229. * @state: state variable used in the napi stat machine
  230. * @ce_map: bit map indicating which ce's have napis running
  231. * @exec_map: bit map of instanciated exec contexts
  232. * @user_cpu_affin_map: CPU affinity map from INI config.
  233. * @napi_cpu: cpu info for irq affinty
  234. * @lilcl_head:
  235. * @bigcl_head:
  236. * @napi_mode: irq affinity & clock voting mode
  237. * @cpuhp_handler: CPU hotplug event registration handle
  238. */
  239. struct qca_napi_data {
  240. struct hif_softc *hif_softc;
  241. qdf_spinlock_t lock;
  242. uint32_t state;
  243. /* bitmap of created/registered NAPI instances, indexed by pipe_id,
  244. * not used by clients (clients use an id returned by create)
  245. */
  246. uint32_t ce_map;
  247. uint32_t exec_map;
  248. uint32_t user_cpu_affin_mask;
  249. struct qca_napi_info *napis[CE_COUNT_MAX];
  250. struct qca_napi_cpu napi_cpu[NR_CPUS];
  251. int lilcl_head, bigcl_head;
  252. enum qca_napi_tput_state napi_mode;
  253. struct qdf_cpuhp_handler *cpuhp_handler;
  254. uint8_t flags;
  255. };
  256. /**
  257. * struct hif_config_info - Place Holder for HIF configuration
  258. * @enable_self_recovery: Self Recovery
  259. * @enable_runtime_pm: Enable Runtime PM
  260. * @runtime_pm_delay: Runtime PM Delay
  261. * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
  262. *
  263. * Structure for holding HIF ini parameters.
  264. */
  265. struct hif_config_info {
  266. bool enable_self_recovery;
  267. #ifdef FEATURE_RUNTIME_PM
  268. uint8_t enable_runtime_pm;
  269. u_int32_t runtime_pm_delay;
  270. #endif
  271. uint64_t rx_softirq_max_yield_duration_ns;
  272. };
  273. /**
  274. * struct hif_target_info - Target Information
  275. * @target_version: Target Version
  276. * @target_type: Target Type
  277. * @target_revision: Target Revision
  278. * @soc_version: SOC Version
  279. * @hw_name: pointer to hardware name
  280. *
  281. * Structure to hold target information.
  282. */
  283. struct hif_target_info {
  284. uint32_t target_version;
  285. uint32_t target_type;
  286. uint32_t target_revision;
  287. uint32_t soc_version;
  288. char *hw_name;
  289. };
  290. struct hif_opaque_softc {
  291. };
  292. /**
  293. * enum hif_event_type - Type of DP events to be recorded
  294. * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
  295. * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
  296. * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
  297. * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
  298. * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
  299. * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
  300. */
  301. enum hif_event_type {
  302. HIF_EVENT_IRQ_TRIGGER,
  303. HIF_EVENT_TIMER_ENTRY,
  304. HIF_EVENT_TIMER_EXIT,
  305. HIF_EVENT_BH_SCHED,
  306. HIF_EVENT_SRNG_ACCESS_START,
  307. HIF_EVENT_SRNG_ACCESS_END,
  308. /* Do check hif_hist_skip_event_record when adding new events */
  309. };
  310. #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
  311. /* HIF_EVENT_HIST_MAX should always be power of 2 */
  312. #define HIF_EVENT_HIST_MAX 512
  313. #define HIF_NUM_INT_CONTEXTS HIF_MAX_GROUP
  314. #define HIF_EVENT_HIST_DISABLE_MASK 0
  315. /**
  316. * struct hif_event_record - an entry of the DP event history
  317. * @hal_ring_id: ring id for which event is recorded
  318. * @hp: head pointer of the ring (may not be applicable for all events)
  319. * @tp: tail pointer of the ring (may not be applicable for all events)
  320. * @cpu_id: cpu id on which the event occurred
  321. * @timestamp: timestamp when event occurred
  322. * @type: type of the event
  323. *
  324. * This structure represents the information stored for every datapath
  325. * event which is logged in the history.
  326. */
  327. struct hif_event_record {
  328. uint8_t hal_ring_id;
  329. uint32_t hp;
  330. uint32_t tp;
  331. int cpu_id;
  332. uint64_t timestamp;
  333. enum hif_event_type type;
  334. };
  335. /**
  336. * struct hif_event_misc - history related misc info
  337. * @last_irq_index: last irq event index in history
  338. * @last_irq_ts: last irq timestamp
  339. */
  340. struct hif_event_misc {
  341. int32_t last_irq_index;
  342. uint64_t last_irq_ts;
  343. };
  344. /**
  345. * struct hif_event_history - history for one interrupt group
  346. * @index: index to store new event
  347. * @event: event entry
  348. *
  349. * This structure represents the datapath history for one
  350. * interrupt group.
  351. */
  352. struct hif_event_history {
  353. qdf_atomic_t index;
  354. struct hif_event_misc misc;
  355. struct hif_event_record event[HIF_EVENT_HIST_MAX];
  356. };
  357. /**
  358. * hif_hist_record_event() - Record one datapath event in history
  359. * @hif_ctx: HIF opaque context
  360. * @event: DP event entry
  361. * @intr_grp_id: interrupt group ID registered with hif
  362. *
  363. * Return: None
  364. */
  365. void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
  366. struct hif_event_record *event,
  367. uint8_t intr_grp_id);
  368. /**
  369. * hif_event_history_init() - Initialize SRNG event history buffers
  370. * @hif_ctx: HIF opaque context
  371. * @id: context group ID for which history is recorded
  372. *
  373. * Returns: None
  374. */
  375. void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
  376. /**
  377. * hif_event_history_deinit() - De-initialize SRNG event history buffers
  378. * @hif_ctx: HIF opaque context
  379. * @id: context group ID for which history is recorded
  380. *
  381. * Returns: None
  382. */
  383. void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
  384. /**
  385. * hif_record_event() - Wrapper function to form and record DP event
  386. * @hif_ctx: HIF opaque context
  387. * @intr_grp_id: interrupt group ID registered with hif
  388. * @hal_ring_id: ring id for which event is recorded
  389. * @hp: head pointer index of the srng
  390. * @tp: tail pointer index of the srng
  391. * @type: type of the event to be logged in history
  392. *
  393. * Return: None
  394. */
  395. static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
  396. uint8_t intr_grp_id,
  397. uint8_t hal_ring_id,
  398. uint32_t hp,
  399. uint32_t tp,
  400. enum hif_event_type type)
  401. {
  402. struct hif_event_record event;
  403. event.hal_ring_id = hal_ring_id;
  404. event.hp = hp;
  405. event.tp = tp;
  406. event.type = type;
  407. hif_hist_record_event(hif_ctx, &event, intr_grp_id);
  408. return;
  409. }
  410. #else
  411. static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
  412. uint8_t intr_grp_id,
  413. uint8_t hal_ring_id,
  414. uint32_t hp,
  415. uint32_t tp,
  416. enum hif_event_type type)
  417. {
  418. }
  419. static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
  420. uint8_t id)
  421. {
  422. }
  423. static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
  424. uint8_t id)
  425. {
  426. }
  427. #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
  428. /**
  429. * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
  430. *
  431. * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
  432. * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
  433. * minimize power
  434. * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
  435. * platform-specific measures to completely power-off
  436. * the module and associated hardware (i.e. cut power
  437. * supplies)
  438. */
  439. enum HIF_DEVICE_POWER_CHANGE_TYPE {
  440. HIF_DEVICE_POWER_UP,
  441. HIF_DEVICE_POWER_DOWN,
  442. HIF_DEVICE_POWER_CUT
  443. };
  444. /**
  445. * enum hif_enable_type: what triggered the enabling of hif
  446. *
  447. * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
  448. * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
  449. */
  450. enum hif_enable_type {
  451. HIF_ENABLE_TYPE_PROBE,
  452. HIF_ENABLE_TYPE_REINIT,
  453. HIF_ENABLE_TYPE_MAX
  454. };
  455. /**
  456. * enum hif_disable_type: what triggered the disabling of hif
  457. *
  458. * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
  459. * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
  460. * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
  461. * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
  462. */
  463. enum hif_disable_type {
  464. HIF_DISABLE_TYPE_PROBE_ERROR,
  465. HIF_DISABLE_TYPE_REINIT_ERROR,
  466. HIF_DISABLE_TYPE_REMOVE,
  467. HIF_DISABLE_TYPE_SHUTDOWN,
  468. HIF_DISABLE_TYPE_MAX
  469. };
  470. /**
  471. * enum hif_device_config_opcode: configure mode
  472. *
  473. * @HIF_DEVICE_POWER_STATE: device power state
  474. * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
  475. * @HIF_DEVICE_GET_ADDR: get block address
  476. * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
  477. * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
  478. * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
  479. * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
  480. * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
  481. * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
  482. * @HIF_DEVICE_GET_OS_DEVICE: get OS device
  483. * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
  484. * @HIF_BMI_DONE: bmi done
  485. * @HIF_DEVICE_SET_TARGET_TYPE: set target type
  486. * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
  487. * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
  488. */
  489. enum hif_device_config_opcode {
  490. HIF_DEVICE_POWER_STATE = 0,
  491. HIF_DEVICE_GET_BLOCK_SIZE,
  492. HIF_DEVICE_GET_FIFO_ADDR,
  493. HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
  494. HIF_DEVICE_GET_IRQ_PROC_MODE,
  495. HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
  496. HIF_DEVICE_POWER_STATE_CHANGE,
  497. HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
  498. HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
  499. HIF_DEVICE_GET_OS_DEVICE,
  500. HIF_DEVICE_DEBUG_BUS_STATE,
  501. HIF_BMI_DONE,
  502. HIF_DEVICE_SET_TARGET_TYPE,
  503. HIF_DEVICE_SET_HTC_CONTEXT,
  504. HIF_DEVICE_GET_HTC_CONTEXT,
  505. };
  506. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  507. struct HID_ACCESS_LOG {
  508. uint32_t seqnum;
  509. bool is_write;
  510. void *addr;
  511. uint32_t value;
  512. };
  513. #endif
  514. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  515. uint32_t value);
  516. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
  517. #define HIF_MAX_DEVICES 1
  518. /**
  519. * struct htc_callbacks - Structure for HTC Callbacks methods
  520. * @context: context to pass to the dsrhandler
  521. * note : rwCompletionHandler is provided the context
  522. * passed to hif_read_write
  523. * @rwCompletionHandler: Read / write completion handler
  524. * @dsrHandler: DSR Handler
  525. */
  526. struct htc_callbacks {
  527. void *context;
  528. QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
  529. QDF_STATUS(*dsr_handler)(void *context);
  530. };
  531. /**
  532. * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
  533. * @context: Private data context
  534. * @set_recovery_in_progress: To Set Driver state for recovery in progress
  535. * @is_recovery_in_progress: Query if driver state is recovery in progress
  536. * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
  537. * @is_driver_unloading: Query if driver is unloading.
  538. * @get_bandwidth_level: Query current bandwidth level for the driver
  539. * @prealloc_get_consistent_mem_unligned: get prealloc unaligned consistent mem
  540. * @prealloc_put_consistent_mem_unligned: put unaligned consistent mem to pool
  541. * This Structure provides callback pointer for HIF to query hdd for driver
  542. * states.
  543. */
  544. struct hif_driver_state_callbacks {
  545. void *context;
  546. void (*set_recovery_in_progress)(void *context, uint8_t val);
  547. bool (*is_recovery_in_progress)(void *context);
  548. bool (*is_load_unload_in_progress)(void *context);
  549. bool (*is_driver_unloading)(void *context);
  550. bool (*is_target_ready)(void *context);
  551. int (*get_bandwidth_level)(void *context);
  552. void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
  553. qdf_dma_addr_t *paddr,
  554. uint32_t ring_type);
  555. void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
  556. };
  557. /* This API detaches the HTC layer from the HIF device */
  558. void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
  559. /****************************************************************/
  560. /* BMI and Diag window abstraction */
  561. /****************************************************************/
  562. #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
  563. #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
  564. * handled atomically by
  565. * DiagRead/DiagWrite
  566. */
  567. #ifdef WLAN_FEATURE_BMI
  568. /*
  569. * API to handle HIF-specific BMI message exchanges, this API is synchronous
  570. * and only allowed to be called from a context that can block (sleep)
  571. */
  572. QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
  573. qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
  574. uint8_t *pSendMessage, uint32_t Length,
  575. uint8_t *pResponseMessage,
  576. uint32_t *pResponseLength, uint32_t TimeoutMS);
  577. void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
  578. bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
  579. #else /* WLAN_FEATURE_BMI */
  580. static inline void
  581. hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
  582. {
  583. }
  584. static inline bool
  585. hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
  586. {
  587. return false;
  588. }
  589. #endif /* WLAN_FEATURE_BMI */
  590. /*
  591. * APIs to handle HIF specific diagnostic read accesses. These APIs are
  592. * synchronous and only allowed to be called from a context that
  593. * can block (sleep). They are not high performance APIs.
  594. *
  595. * hif_diag_read_access reads a 4 Byte aligned/length value from a
  596. * Target register or memory word.
  597. *
  598. * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
  599. */
  600. QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
  601. uint32_t address, uint32_t *data);
  602. QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
  603. uint8_t *data, int nbytes);
  604. void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
  605. void *ramdump_base, uint32_t address, uint32_t size);
  606. /*
  607. * APIs to handle HIF specific diagnostic write accesses. These APIs are
  608. * synchronous and only allowed to be called from a context that
  609. * can block (sleep).
  610. * They are not high performance APIs.
  611. *
  612. * hif_diag_write_access writes a 4 Byte aligned/length value to a
  613. * Target register or memory word.
  614. *
  615. * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
  616. */
  617. QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
  618. uint32_t address, uint32_t data);
  619. QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
  620. uint32_t address, uint8_t *data, int nbytes);
  621. typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
  622. void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
  623. bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
  624. /*
  625. * Set the FASTPATH_mode_on flag in sc, for use by data path
  626. */
  627. #ifdef WLAN_FEATURE_FASTPATH
  628. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
  629. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
  630. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
  631. /**
  632. * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
  633. * @handler: Callback funtcion
  634. * @context: handle for callback function
  635. *
  636. * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
  637. */
  638. QDF_STATUS hif_ce_fastpath_cb_register(
  639. struct hif_opaque_softc *hif_ctx,
  640. fastpath_msg_handler handler, void *context);
  641. #else
  642. static inline QDF_STATUS hif_ce_fastpath_cb_register(
  643. struct hif_opaque_softc *hif_ctx,
  644. fastpath_msg_handler handler, void *context)
  645. {
  646. return QDF_STATUS_E_FAILURE;
  647. }
  648. static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
  649. {
  650. return NULL;
  651. }
  652. #endif
  653. /*
  654. * Enable/disable CDC max performance workaround
  655. * For max-performace set this to 0
  656. * To allow SoC to enter sleep set this to 1
  657. */
  658. #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
  659. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  660. qdf_shared_mem_t **ce_sr,
  661. uint32_t *ce_sr_ring_size,
  662. qdf_dma_addr_t *ce_reg_paddr);
  663. /**
  664. * @brief List of callbacks - filled in by HTC.
  665. */
  666. struct hif_msg_callbacks {
  667. void *Context;
  668. /**< context meaningful to HTC */
  669. QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  670. uint32_t transferID,
  671. uint32_t toeplitz_hash_result);
  672. QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
  673. uint8_t pipeID);
  674. void (*txResourceAvailHandler)(void *context, uint8_t pipe);
  675. void (*fwEventHandler)(void *context, QDF_STATUS status);
  676. void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
  677. };
  678. enum hif_target_status {
  679. TARGET_STATUS_CONNECTED = 0, /* target connected */
  680. TARGET_STATUS_RESET, /* target got reset */
  681. TARGET_STATUS_EJECT, /* target got ejected */
  682. TARGET_STATUS_SUSPEND /*target got suspend */
  683. };
  684. /**
  685. * enum hif_attribute_flags: configure hif
  686. *
  687. * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
  688. * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
  689. * + No pktlog CE
  690. */
  691. enum hif_attribute_flags {
  692. HIF_LOWDESC_CE_CFG = 1,
  693. HIF_LOWDESC_CE_NO_PKTLOG_CFG
  694. };
  695. #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
  696. (attr |= (v & 0x01) << 5)
  697. #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
  698. (attr |= (v & 0x03) << 6)
  699. #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
  700. (attr |= (v & 0x01) << 13)
  701. #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
  702. (attr |= (v & 0x01) << 14)
  703. #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
  704. (attr |= (v & 0x01) << 15)
  705. #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
  706. (attr |= (v & 0x0FFF) << 16)
  707. #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
  708. (attr |= (v & 0x01) << 30)
  709. struct hif_ul_pipe_info {
  710. unsigned int nentries;
  711. unsigned int nentries_mask;
  712. unsigned int sw_index;
  713. unsigned int write_index; /* cached copy */
  714. unsigned int hw_index; /* cached copy */
  715. void *base_addr_owner_space; /* Host address space */
  716. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  717. };
  718. struct hif_dl_pipe_info {
  719. unsigned int nentries;
  720. unsigned int nentries_mask;
  721. unsigned int sw_index;
  722. unsigned int write_index; /* cached copy */
  723. unsigned int hw_index; /* cached copy */
  724. void *base_addr_owner_space; /* Host address space */
  725. qdf_dma_addr_t base_addr_CE_space; /* CE address space */
  726. };
  727. struct hif_pipe_addl_info {
  728. uint32_t pci_mem;
  729. uint32_t ctrl_addr;
  730. struct hif_ul_pipe_info ul_pipe;
  731. struct hif_dl_pipe_info dl_pipe;
  732. };
  733. #ifdef CONFIG_SLUB_DEBUG_ON
  734. #define MSG_FLUSH_NUM 16
  735. #else /* PERF build */
  736. #define MSG_FLUSH_NUM 32
  737. #endif /* SLUB_DEBUG_ON */
  738. struct hif_bus_id;
  739. void hif_claim_device(struct hif_opaque_softc *hif_ctx);
  740. QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
  741. int opcode, void *config, uint32_t config_len);
  742. void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
  743. void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
  744. void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
  745. struct hif_msg_callbacks *callbacks);
  746. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
  747. void hif_stop(struct hif_opaque_softc *hif_ctx);
  748. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
  749. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
  750. void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
  751. uint8_t cmd_id, bool start);
  752. QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  753. uint32_t transferID, uint32_t nbytes,
  754. qdf_nbuf_t wbuf, uint32_t data_attr);
  755. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
  756. int force);
  757. void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
  758. void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
  759. uint8_t *DLPipe);
  760. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
  761. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  762. int *dl_is_polled);
  763. uint16_t
  764. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
  765. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
  766. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
  767. void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
  768. bool wait_for_it);
  769. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
  770. #ifndef HIF_PCI
  771. static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  772. {
  773. return 0;
  774. }
  775. #else
  776. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
  777. #endif
  778. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  779. u32 *revision, const char **target_name);
  780. #ifdef RECEIVE_OFFLOAD
  781. /**
  782. * hif_offld_flush_cb_register() - Register the offld flush callback
  783. * @scn: HIF opaque context
  784. * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
  785. * Or GRO/LRO flush when RxThread is not enabled. Called
  786. * with corresponding context for flush.
  787. * Return: None
  788. */
  789. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  790. void (offld_flush_handler)(void *ol_ctx));
  791. /**
  792. * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
  793. * @scn: HIF opaque context
  794. *
  795. * Return: None
  796. */
  797. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
  798. #endif
  799. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  800. /**
  801. * hif_exec_should_yield() - Check if hif napi context should yield
  802. * @hif_ctx - HIF opaque context
  803. * @grp_id - grp_id of the napi for which check needs to be done
  804. *
  805. * The function uses grp_id to look for NAPI and checks if NAPI needs to
  806. * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
  807. * yield decision.
  808. *
  809. * Return: true if NAPI needs to yield, else false
  810. */
  811. bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
  812. #else
  813. static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
  814. uint grp_id)
  815. {
  816. return false;
  817. }
  818. #endif
  819. void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
  820. void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
  821. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  822. int htc_htt_tx_endpoint);
  823. /**
  824. * hif_open() - Create hif handle
  825. * @qdf_ctx: qdf context
  826. * @mode: Driver Mode
  827. * @bus_type: Bus Type
  828. * @cbk: CDS Callbacks
  829. * @psoc: psoc object manager
  830. *
  831. * API to open HIF Context
  832. *
  833. * Return: HIF Opaque Pointer
  834. */
  835. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  836. uint32_t mode,
  837. enum qdf_bus_type bus_type,
  838. struct hif_driver_state_callbacks *cbk,
  839. struct wlan_objmgr_psoc *psoc);
  840. /**
  841. * hif_init_dma_mask() - Set dma mask for the dev
  842. * @dev: dev for which DMA mask is to be set
  843. * @bus_type: bus type for the target
  844. *
  845. * This API sets the DMA mask for the device. before the datapath
  846. * memory pre-allocation is done. If the DMA mask is not set before
  847. * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
  848. * and does not utilize the full device capability.
  849. *
  850. * Return: 0 - success, non-zero on failure.
  851. */
  852. int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
  853. void hif_close(struct hif_opaque_softc *hif_ctx);
  854. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  855. void *bdev, const struct hif_bus_id *bid,
  856. enum qdf_bus_type bus_type,
  857. enum hif_enable_type type);
  858. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
  859. #ifdef CE_TASKLET_DEBUG_ENABLE
  860. void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
  861. uint8_t value);
  862. #endif
  863. void hif_display_stats(struct hif_opaque_softc *hif_ctx);
  864. void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
  865. /**
  866. * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
  867. * HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
  868. * HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
  869. * HIF_PM_CE_WAKE: Wake irq is CE interrupt
  870. */
  871. typedef enum {
  872. HIF_PM_INVALID_WAKE,
  873. HIF_PM_MSI_WAKE,
  874. HIF_PM_CE_WAKE,
  875. } hif_pm_wake_irq_type;
  876. /**
  877. * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
  878. * @hif_ctx: HIF context
  879. *
  880. * Return: enum hif_pm_wake_irq_type
  881. */
  882. hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
  883. /**
  884. * enum wlan_rtpm_dbgid - runtime pm put/get debug id
  885. * @RTPM_ID_RESVERD: Reserved
  886. * @RTPM_ID_WMI: WMI sending msg, expect put happen at
  887. * tx completion from CE level directly.
  888. * @RTPM_ID_HTC: pkt sending by HTT_DATA_MSG_SVC, expect
  889. * put from fw response or just in
  890. * htc_issue_packets
  891. * @RTPM_ID_QOS_NOTIFY: pm qos notifer
  892. * @RTPM_ID_DP_TX_DESC_ALLOC_FREE: tx desc alloc/free
  893. * @RTPM_ID_CE_SEND_FAST: operation in ce_send_fast, not include
  894. * the pkt put happens outside this function
  895. * @RTPM_ID_SUSPEND_RESUME: suspend/resume in hdd
  896. * @RTPM_ID_DW_TX_HW_ENQUEUE: operation in functin dp_tx_hw_enqueue
  897. * @RTPM_ID_HAL_REO_CMD: HAL_REO_CMD operation
  898. * @RTPM_ID_DP_PRINT_RING_STATS: operation in dp_print_ring_stats
  899. */
  900. /* New value added to the enum must also be reflected in function
  901. * rtpm_string_from_dbgid()
  902. */
  903. typedef enum {
  904. RTPM_ID_RESVERD = 0,
  905. RTPM_ID_WMI = 1,
  906. RTPM_ID_HTC = 2,
  907. RTPM_ID_QOS_NOTIFY = 3,
  908. RTPM_ID_DP_TX_DESC_ALLOC_FREE = 4,
  909. RTPM_ID_CE_SEND_FAST = 5,
  910. RTPM_ID_SUSPEND_RESUME = 6,
  911. RTPM_ID_DW_TX_HW_ENQUEUE = 7,
  912. RTPM_ID_HAL_REO_CMD = 8,
  913. RTPM_ID_DP_PRINT_RING_STATS = 9,
  914. RTPM_ID_MAX,
  915. } wlan_rtpm_dbgid;
  916. /**
  917. * rtpm_string_from_dbgid() - Convert dbgid to respective string
  918. * @id - debug id
  919. *
  920. * Debug support function to convert dbgid to string.
  921. * Please note to add new string in the array at index equal to
  922. * its enum value in wlan_rtpm_dbgid.
  923. */
  924. static inline char *rtpm_string_from_dbgid(wlan_rtpm_dbgid id)
  925. {
  926. static const char *strings[] = { "RTPM_ID_RESVERD",
  927. "RTPM_ID_WMI",
  928. "RTPM_ID_HTC",
  929. "RTPM_ID_QOS_NOTIFY",
  930. "RTPM_ID_DP_TX_DESC_ALLOC_FREE",
  931. "RTPM_ID_CE_SEND_FAST",
  932. "RTPM_ID_SUSPEND_RESUME",
  933. "RTPM_ID_DW_TX_HW_ENQUEUE",
  934. "RTPM_ID_HAL_REO_CMD",
  935. "RTPM_ID_DP_PRINT_RING_STATS",
  936. "RTPM_ID_MAX"};
  937. return (char *)strings[id];
  938. }
  939. /**
  940. * enum hif_pm_link_state - hif link state
  941. * HIF_PM_LINK_STATE_DOWN: hif link state is down
  942. * HIF_PM_LINK_STATE_UP: hif link state is up
  943. */
  944. enum hif_pm_link_state {
  945. HIF_PM_LINK_STATE_DOWN,
  946. HIF_PM_LINK_STATE_UP
  947. };
  948. #ifdef FEATURE_RUNTIME_PM
  949. struct hif_pm_runtime_lock;
  950. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
  951. int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
  952. wlan_rtpm_dbgid rtpm_dbgid);
  953. int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
  954. wlan_rtpm_dbgid rtpm_dbgid);
  955. int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx);
  956. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
  957. wlan_rtpm_dbgid rtpm_dbgid,
  958. bool is_critical_ctx);
  959. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
  960. wlan_rtpm_dbgid rtpm_dbgid);
  961. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
  962. wlan_rtpm_dbgid rtpm_dbgid);
  963. int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
  964. wlan_rtpm_dbgid rtpm_dbgid);
  965. void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx);
  966. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
  967. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  968. struct hif_pm_runtime_lock *lock);
  969. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  970. struct hif_pm_runtime_lock *lock);
  971. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  972. struct hif_pm_runtime_lock *lock);
  973. bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx);
  974. void hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx);
  975. void hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx);
  976. int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx);
  977. void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
  978. int val);
  979. void hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx);
  980. void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
  981. int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx);
  982. qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx);
  983. int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx);
  984. /**
  985. * hif_pm_set_link_state() - set link state during RTPM
  986. * @hif_sc: HIF Context
  987. *
  988. * Return: None
  989. */
  990. void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val);
  991. /**
  992. * hif_is_link_state_up() - Is link state up
  993. * @hif_sc: HIF Context
  994. *
  995. * Return: 1 link is up, 0 link is down
  996. */
  997. uint8_t hif_pm_get_link_state(struct hif_opaque_softc *hif_handle);
  998. #else
  999. struct hif_pm_runtime_lock {
  1000. const char *name;
  1001. };
  1002. static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  1003. static inline int
  1004. hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
  1005. wlan_rtpm_dbgid rtpm_dbgid)
  1006. { return 0; }
  1007. static inline int
  1008. hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
  1009. wlan_rtpm_dbgid rtpm_dbgid)
  1010. { return 0; }
  1011. static inline int
  1012. hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
  1013. { return 0; }
  1014. static inline void
  1015. hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
  1016. wlan_rtpm_dbgid rtpm_dbgid)
  1017. {}
  1018. static inline int
  1019. hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid,
  1020. bool is_critical_ctx)
  1021. { return 0; }
  1022. static inline int
  1023. hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx, wlan_rtpm_dbgid rtpm_dbgid)
  1024. { return 0; }
  1025. static inline int
  1026. hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
  1027. wlan_rtpm_dbgid rtpm_dbgid)
  1028. { return 0; }
  1029. static inline void
  1030. hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx) {};
  1031. static inline int hif_runtime_lock_init(qdf_runtime_lock_t *lock,
  1032. const char *name)
  1033. { return 0; }
  1034. static inline void
  1035. hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  1036. struct hif_pm_runtime_lock *lock) {}
  1037. static inline int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  1038. struct hif_pm_runtime_lock *lock)
  1039. { return 0; }
  1040. static inline int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  1041. struct hif_pm_runtime_lock *lock)
  1042. { return 0; }
  1043. static inline bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
  1044. { return false; }
  1045. static inline void
  1046. hif_pm_runtime_suspend_lock(struct hif_opaque_softc *hif_ctx)
  1047. { return; }
  1048. static inline void
  1049. hif_pm_runtime_suspend_unlock(struct hif_opaque_softc *hif_ctx)
  1050. { return; }
  1051. static inline int
  1052. hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
  1053. { return 0; }
  1054. static inline void
  1055. hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx, int val)
  1056. { return; }
  1057. static inline void
  1058. hif_pm_runtime_check_and_request_resume(struct hif_opaque_softc *hif_ctx)
  1059. { return; }
  1060. static inline void
  1061. hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx) {};
  1062. static inline int
  1063. hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  1064. { return 0; }
  1065. static inline qdf_time_t
  1066. hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
  1067. { return 0; }
  1068. static inline int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
  1069. { return 0; }
  1070. static inline
  1071. void hif_pm_set_link_state(struct hif_opaque_softc *hif_handle, uint8_t val)
  1072. {}
  1073. #endif
  1074. void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
  1075. bool is_packet_log_enabled);
  1076. void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
  1077. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
  1078. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
  1079. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
  1080. #ifdef IPA_OFFLOAD
  1081. /**
  1082. * hif_get_ipa_hw_type() - get IPA hw type
  1083. *
  1084. * This API return the IPA hw type.
  1085. *
  1086. * Return: IPA hw type
  1087. */
  1088. static inline
  1089. enum ipa_hw_type hif_get_ipa_hw_type(void)
  1090. {
  1091. return ipa_get_hw_type();
  1092. }
  1093. /**
  1094. * hif_get_ipa_present() - get IPA hw status
  1095. *
  1096. * This API return the IPA hw status.
  1097. *
  1098. * Return: true if IPA is present or false otherwise
  1099. */
  1100. static inline
  1101. bool hif_get_ipa_present(void)
  1102. {
  1103. if (ipa_uc_reg_rdyCB(NULL) != -EPERM)
  1104. return true;
  1105. else
  1106. return false;
  1107. }
  1108. #endif
  1109. int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
  1110. /**
  1111. * hif_bus_ealry_suspend() - stop non wmi tx traffic
  1112. * @context: hif context
  1113. */
  1114. int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
  1115. /**
  1116. * hif_bus_late_resume() - resume non wmi traffic
  1117. * @context: hif context
  1118. */
  1119. int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
  1120. int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
  1121. int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
  1122. int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
  1123. /**
  1124. * hif_apps_irqs_enable() - Enables all irqs from the APPS side
  1125. * @hif_ctx: an opaque HIF handle to use
  1126. *
  1127. * As opposed to the standard hif_irq_enable, this function always applies to
  1128. * the APPS side kernel interrupt handling.
  1129. *
  1130. * Return: errno
  1131. */
  1132. int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
  1133. /**
  1134. * hif_apps_irqs_disable() - Disables all irqs from the APPS side
  1135. * @hif_ctx: an opaque HIF handle to use
  1136. *
  1137. * As opposed to the standard hif_irq_disable, this function always applies to
  1138. * the APPS side kernel interrupt handling.
  1139. *
  1140. * Return: errno
  1141. */
  1142. int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
  1143. /**
  1144. * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
  1145. * @hif_ctx: an opaque HIF handle to use
  1146. *
  1147. * As opposed to the standard hif_irq_enable, this function always applies to
  1148. * the APPS side kernel interrupt handling.
  1149. *
  1150. * Return: errno
  1151. */
  1152. int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
  1153. /**
  1154. * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
  1155. * @hif_ctx: an opaque HIF handle to use
  1156. *
  1157. * As opposed to the standard hif_irq_disable, this function always applies to
  1158. * the APPS side kernel interrupt handling.
  1159. *
  1160. * Return: errno
  1161. */
  1162. int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
  1163. /**
  1164. * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
  1165. * @hif_ctx: an opaque HIF handle to use
  1166. *
  1167. * This function always applies to the APPS side kernel interrupt handling
  1168. * to wake the system from suspend.
  1169. *
  1170. * Return: errno
  1171. */
  1172. int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
  1173. /**
  1174. * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
  1175. * @hif_ctx: an opaque HIF handle to use
  1176. *
  1177. * This function always applies to the APPS side kernel interrupt handling
  1178. * to disable the wake irq.
  1179. *
  1180. * Return: errno
  1181. */
  1182. int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
  1183. #ifdef FEATURE_RUNTIME_PM
  1184. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  1185. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx);
  1186. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
  1187. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
  1188. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx);
  1189. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx);
  1190. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx);
  1191. #endif
  1192. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
  1193. int hif_dump_registers(struct hif_opaque_softc *scn);
  1194. int ol_copy_ramdump(struct hif_opaque_softc *scn);
  1195. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
  1196. void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
  1197. u32 *revision, const char **target_name);
  1198. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
  1199. struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
  1200. scn);
  1201. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
  1202. struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
  1203. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
  1204. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  1205. hif_target_status);
  1206. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  1207. struct hif_config_info *cfg);
  1208. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
  1209. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1210. uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
  1211. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1212. uint32_t transfer_id, u_int32_t len);
  1213. int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
  1214. uint32_t transfer_id, uint32_t download_len);
  1215. void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
  1216. void hif_ce_war_disable(void);
  1217. void hif_ce_war_enable(void);
  1218. void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
  1219. #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
  1220. struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
  1221. struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
  1222. uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
  1223. uint32_t pipe_num);
  1224. int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
  1225. #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
  1226. void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
  1227. int rx_bundle_cnt);
  1228. int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
  1229. void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
  1230. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
  1231. enum hif_exec_type {
  1232. HIF_EXEC_NAPI_TYPE,
  1233. HIF_EXEC_TASKLET_TYPE,
  1234. };
  1235. typedef uint32_t (*ext_intr_handler)(void *, uint32_t);
  1236. /**
  1237. * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
  1238. * @softc: hif opaque context owning the exec context
  1239. * @id: the id of the interrupt context
  1240. *
  1241. * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
  1242. * 'id' registered with the OS
  1243. */
  1244. int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
  1245. uint8_t id);
  1246. /**
  1247. * hif_configure_ext_group_interrupts() - Congigure ext group intrrupts
  1248. * @hif_ctx: hif opaque context
  1249. *
  1250. * Return: QDF_STATUS
  1251. */
  1252. QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
  1253. /**
  1254. * hif_register_ext_group() - API to register external group
  1255. * interrupt handler.
  1256. * @hif_ctx : HIF Context
  1257. * @numirq: number of irq's in the group
  1258. * @irq: array of irq values
  1259. * @handler: callback interrupt handler function
  1260. * @cb_ctx: context to passed in callback
  1261. * @type: napi vs tasklet
  1262. *
  1263. * Return: QDF_STATUS
  1264. */
  1265. QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  1266. uint32_t numirq, uint32_t irq[],
  1267. ext_intr_handler handler,
  1268. void *cb_ctx, const char *context_name,
  1269. enum hif_exec_type type, uint32_t scale);
  1270. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  1271. const char *context_name);
  1272. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  1273. u_int8_t pipeid,
  1274. struct hif_msg_callbacks *callbacks);
  1275. /**
  1276. * hif_print_napi_stats() - Display HIF NAPI stats
  1277. * @hif_ctx - HIF opaque context
  1278. *
  1279. * Return: None
  1280. */
  1281. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
  1282. /* hif_clear_napi_stats() - function clears the stats of the
  1283. * latency when called.
  1284. * @hif_ctx - the HIF context to assign the callback to
  1285. *
  1286. * Return: None
  1287. */
  1288. void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
  1289. #ifdef __cplusplus
  1290. }
  1291. #endif
  1292. #ifdef FORCE_WAKE
  1293. /**
  1294. * hif_force_wake_request() - Function to wake from power collapse
  1295. * @handle: HIF opaque handle
  1296. *
  1297. * Description: API to check if the device is awake or not before
  1298. * read/write to BAR + 4K registers. If device is awake return
  1299. * success otherwise write '1' to
  1300. * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
  1301. * the device and does wakeup the PCI and MHI within 50ms
  1302. * and then the device writes a value to
  1303. * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
  1304. * handshake process to let the host know the device is awake.
  1305. *
  1306. * Return: zero - success/non-zero - failure
  1307. */
  1308. int hif_force_wake_request(struct hif_opaque_softc *handle);
  1309. /**
  1310. * hif_force_wake_release() - API to release/reset the SOC wake register
  1311. * from interrupting the device.
  1312. * @handle: HIF opaque handle
  1313. *
  1314. * Description: API to set the
  1315. * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
  1316. * to release the interrupt line.
  1317. *
  1318. * Return: zero - success/non-zero - failure
  1319. */
  1320. int hif_force_wake_release(struct hif_opaque_softc *handle);
  1321. #else
  1322. static inline
  1323. int hif_force_wake_request(struct hif_opaque_softc *handle)
  1324. {
  1325. return 0;
  1326. }
  1327. static inline
  1328. int hif_force_wake_release(struct hif_opaque_softc *handle)
  1329. {
  1330. return 0;
  1331. }
  1332. #endif /* FORCE_WAKE */
  1333. #ifdef FEATURE_HAL_DELAYED_REG_WRITE
  1334. /**
  1335. * hif_prevent_link_low_power_states() - Prevent from going to low power states
  1336. * @hif - HIF opaque context
  1337. *
  1338. * Return: 0 on success. Error code on failure.
  1339. */
  1340. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
  1341. /**
  1342. * hif_allow_link_low_power_states() - Allow link to go to low power states
  1343. * @hif - HIF opaque context
  1344. *
  1345. * Return: None
  1346. */
  1347. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
  1348. #else
  1349. static inline
  1350. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
  1351. {
  1352. return 0;
  1353. }
  1354. static inline
  1355. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
  1356. {
  1357. }
  1358. #endif
  1359. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
  1360. void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
  1361. /**
  1362. * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
  1363. * @hif_ctx - the HIF context to assign the callback to
  1364. * @callback - the callback to assign
  1365. * @priv - the private data to pass to the callback when invoked
  1366. *
  1367. * Return: None
  1368. */
  1369. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  1370. void (*callback)(void *),
  1371. void *priv);
  1372. /*
  1373. * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
  1374. * for defined here
  1375. */
  1376. #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
  1377. ssize_t hif_dump_desc_trace_buf(struct device *dev,
  1378. struct device_attribute *attr, char *buf);
  1379. ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
  1380. const char *buf, size_t size);
  1381. ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
  1382. const char *buf, size_t size);
  1383. ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
  1384. ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
  1385. #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
  1386. /**
  1387. * hif_set_ce_service_max_yield_time() - sets CE service max yield time
  1388. * @hif: hif context
  1389. * @ce_service_max_yield_time: CE service max yield time to set
  1390. *
  1391. * This API storess CE service max yield time in hif context based
  1392. * on ini value.
  1393. *
  1394. * Return: void
  1395. */
  1396. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  1397. uint32_t ce_service_max_yield_time);
  1398. /**
  1399. * hif_get_ce_service_max_yield_time() - get CE service max yield time
  1400. * @hif: hif context
  1401. *
  1402. * This API returns CE service max yield time.
  1403. *
  1404. * Return: CE service max yield time
  1405. */
  1406. unsigned long long
  1407. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
  1408. /**
  1409. * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
  1410. * @hif: hif context
  1411. * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
  1412. *
  1413. * This API stores CE service max rx ind flush in hif context based
  1414. * on ini value.
  1415. *
  1416. * Return: void
  1417. */
  1418. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  1419. uint8_t ce_service_max_rx_ind_flush);
  1420. #ifdef OL_ATH_SMART_LOGGING
  1421. /*
  1422. * hif_log_ce_dump() - Copy all the CE DEST ring to buf
  1423. * @scn : HIF handler
  1424. * @buf_cur: Current pointer in ring buffer
  1425. * @buf_init:Start of the ring buffer
  1426. * @buf_sz: Size of the ring buffer
  1427. * @ce: Copy Engine id
  1428. * @skb_sz: Max size of the SKB buffer to be copied
  1429. *
  1430. * Calls the respective function to dump all the CE SRC/DEST ring descriptors
  1431. * and buffers pointed by them in to the given buf
  1432. *
  1433. * Return: Current pointer in ring buffer
  1434. */
  1435. uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
  1436. uint8_t *buf_init, uint32_t buf_sz,
  1437. uint32_t ce, uint32_t skb_sz);
  1438. #endif /* OL_ATH_SMART_LOGGING */
  1439. /*
  1440. * hif_softc_to_hif_opaque_softc - API to convert hif_softc handle
  1441. * to hif_opaque_softc handle
  1442. * @hif_handle - hif_softc type
  1443. *
  1444. * Return: hif_opaque_softc type
  1445. */
  1446. static inline struct hif_opaque_softc *
  1447. hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
  1448. {
  1449. return (struct hif_opaque_softc *)hif_handle;
  1450. }
  1451. #ifdef FORCE_WAKE
  1452. /**
  1453. * hif_srng_init_phase(): Indicate srng initialization phase
  1454. * to avoid force wake as UMAC power collapse is not yet
  1455. * enabled
  1456. * @hif_ctx: hif opaque handle
  1457. * @init_phase: initialization phase
  1458. *
  1459. * Return: None
  1460. */
  1461. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  1462. bool init_phase);
  1463. #else
  1464. static inline
  1465. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  1466. bool init_phase)
  1467. {
  1468. }
  1469. #endif /* FORCE_WAKE */
  1470. #ifdef HIF_IPCI
  1471. /**
  1472. * hif_shutdown_notifier_cb - Call back for shutdown notifier
  1473. * @ctx: hif handle
  1474. *
  1475. * Return: None
  1476. */
  1477. void hif_shutdown_notifier_cb(void *ctx);
  1478. #else
  1479. static inline
  1480. void hif_shutdown_notifier_cb(void *ctx)
  1481. {
  1482. }
  1483. #endif /* HIF_IPCI */
  1484. #ifdef HIF_CE_LOG_INFO
  1485. /**
  1486. * hif_log_ce_info() - API to log ce info
  1487. * @scn: hif handle
  1488. * @data: hang event data buffer
  1489. * @offset: offset at which data needs to be written
  1490. *
  1491. * Return: None
  1492. */
  1493. void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
  1494. unsigned int *offset);
  1495. #else
  1496. static inline
  1497. void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
  1498. unsigned int *offset)
  1499. {
  1500. }
  1501. #endif
  1502. #ifdef HIF_CPU_PERF_AFFINE_MASK
  1503. /**
  1504. * hif_config_irq_set_perf_affinity_hint() - API to set affinity
  1505. * @hif_ctx: hif opaque handle
  1506. *
  1507. * This function is used to move the WLAN IRQs to perf cores in
  1508. * case of defconfig builds.
  1509. *
  1510. * Return: None
  1511. */
  1512. void hif_config_irq_set_perf_affinity_hint(
  1513. struct hif_opaque_softc *hif_ctx);
  1514. #else
  1515. static inline void hif_config_irq_set_perf_affinity_hint(
  1516. struct hif_opaque_softc *hif_ctx)
  1517. {
  1518. }
  1519. #endif
  1520. #endif /* _HIF_H_ */