gsi.h 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #ifndef GSI_H
  6. #define GSI_H
  7. #include <linux/device.h>
  8. #include <linux/types.h>
  9. #include <linux/completion.h>
  10. #include <linux/mutex.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/msm_gsi.h>
  13. #include <linux/errno.h>
  14. #include <linux/ipc_logging.h>
  15. /*
  16. * The following for adding code (ie. for EMULATION) not found on x86.
  17. */
  18. #if defined(CONFIG_IPA_EMULATION)
  19. # include "gsi_emulation_stubs.h"
  20. #endif
  21. #define GSI_ASSERT() \
  22. BUG()
  23. #define GSI_CHAN_MAX 36
  24. #define GSI_EVT_RING_MAX 31
  25. #define GSI_NO_EVT_ERINDEX 255
  26. #define GSI_ISR_CACHE_MAX 20
  27. #define MAX_CHANNELS_SHARING_EVENT_RING 2
  28. #define GSI_IPC_LOGGING(buf, fmt, args...) \
  29. do { \
  30. if (buf) \
  31. ipc_log_string((buf), fmt, __func__, __LINE__, \
  32. ## args); \
  33. } while (0)
  34. #define GSIDBG(fmt, args...) \
  35. do { \
  36. dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  37. ## args);\
  38. if (gsi_ctx) { \
  39. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
  40. "%s:%d " fmt, ## args); \
  41. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  42. "%s:%d " fmt, ## args); \
  43. } \
  44. } while (0)
  45. #define GSIDBG_LOW(fmt, args...) \
  46. do { \
  47. dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  48. ## args);\
  49. if (gsi_ctx) { \
  50. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  51. "%s:%d " fmt, ## args); \
  52. } \
  53. } while (0)
  54. #define GSIERR(fmt, args...) \
  55. do { \
  56. dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  57. ## args);\
  58. if (gsi_ctx) { \
  59. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
  60. "%s:%d " fmt, ## args); \
  61. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  62. "%s:%d " fmt, ## args); \
  63. } \
  64. } while (0)
  65. #define GSI_IPC_LOG_PAGES 50
  66. enum gsi_ver {
  67. GSI_VER_ERR = 0,
  68. GSI_VER_1_0 = 1,
  69. GSI_VER_1_2 = 2,
  70. GSI_VER_1_3 = 3,
  71. GSI_VER_2_0 = 4,
  72. GSI_VER_2_2 = 5,
  73. GSI_VER_2_5 = 6,
  74. GSI_VER_2_7 = 7,
  75. GSI_VER_2_9 = 8,
  76. GSI_VER_2_11 = 9,
  77. GSI_VER_3_0 = 10,
  78. GSI_VER_MAX,
  79. };
  80. enum gsi_status {
  81. GSI_STATUS_SUCCESS = 0,
  82. GSI_STATUS_ERROR = 1,
  83. GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
  84. GSI_STATUS_RING_EMPTY = 3,
  85. GSI_STATUS_RES_ALLOC_FAILURE = 4,
  86. GSI_STATUS_BAD_STATE = 5,
  87. GSI_STATUS_INVALID_PARAMS = 6,
  88. GSI_STATUS_UNSUPPORTED_OP = 7,
  89. GSI_STATUS_NODEV = 8,
  90. GSI_STATUS_POLL_EMPTY = 9,
  91. GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
  92. GSI_STATUS_TIMED_OUT = 11,
  93. GSI_STATUS_AGAIN = 12,
  94. GSI_STATUS_PENDING_IRQ = 13,
  95. };
  96. enum gsi_intr_type {
  97. GSI_INTR_MSI = 0x0,
  98. GSI_INTR_IRQ = 0x1
  99. };
  100. enum gsi_evt_err {
  101. GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
  102. GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
  103. GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
  104. GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
  105. };
  106. /**
  107. * gsi_evt_err_notify - event ring error callback info
  108. *
  109. * @user_data: cookie supplied in gsi_alloc_evt_ring
  110. * @evt_id: type of error
  111. * @err_desc: more info about the error
  112. *
  113. */
  114. struct gsi_evt_err_notify {
  115. void *user_data;
  116. enum gsi_evt_err evt_id;
  117. uint16_t err_desc;
  118. };
  119. enum gsi_evt_chtype {
  120. GSI_EVT_CHTYPE_MHI_EV = 0x0,
  121. GSI_EVT_CHTYPE_XHCI_EV = 0x1,
  122. GSI_EVT_CHTYPE_GPI_EV = 0x2,
  123. GSI_EVT_CHTYPE_XDCI_EV = 0x3,
  124. GSI_EVT_CHTYPE_WDI2_EV = 0x4,
  125. GSI_EVT_CHTYPE_GCI_EV = 0x5,
  126. GSI_EVT_CHTYPE_WDI3_EV = 0x6,
  127. GSI_EVT_CHTYPE_MHIP_EV = 0x7,
  128. GSI_EVT_CHTYPE_AQC_EV = 0x8,
  129. GSI_EVT_CHTYPE_11AD_EV = 0x9,
  130. GSI_EVT_CHTYPE_RTK_EV = 0xC,
  131. };
  132. enum gsi_evt_ring_elem_size {
  133. GSI_EVT_RING_RE_SIZE_4B = 4,
  134. GSI_EVT_RING_RE_SIZE_8B = 8,
  135. GSI_EVT_RING_RE_SIZE_16B = 16,
  136. GSI_EVT_RING_RE_SIZE_32B = 32,
  137. };
  138. /**
  139. * gsi_evt_ring_props - Event ring related properties
  140. *
  141. * @intf: interface type (of the associated channel)
  142. * @intr: interrupt type
  143. * @re_size: size of event ring element
  144. * @ring_len: length of ring in bytes (must be integral multiple of
  145. * re_size)
  146. * @ring_base_addr: physical base address of ring. Address must be aligned to
  147. * ring_len rounded to power of two
  148. * @ring_base_vaddr: virtual base address of ring (set to NULL when not
  149. * applicable)
  150. * @int_modt: cycles base interrupt moderation (32KHz clock)
  151. * @int_modc: interrupt moderation packet counter
  152. * @intvec: write data for MSI write
  153. * @msi_addr: MSI address
  154. * @rp_update_addr: physical address to which event read pointer should be
  155. * written on every event generation. must be set to 0 when
  156. * no update is desdired
  157. * @rp_update_vaddr: virtual address of event ring read pointer (set to NULL
  158. * when not applicable)
  159. * @exclusive: if true, only one GSI channel can be associated with this
  160. * event ring. if false, the event ring can be shared among
  161. * multiple GSI channels but in that case no polling
  162. * (GSI_CHAN_MODE_POLL) is supported on any of those channels
  163. * @err_cb: error notification callback
  164. * @user_data: cookie used for error notifications
  165. * @evchid_valid: is evchid valid?
  166. * @evchid: the event ID that is being specifically requested (this is
  167. * relevant for MHI where doorbell routing requires ERs to be
  168. * physically contiguous)
  169. * @gsi_read_event_ring_rp: function reads the value of the event ring RP.
  170. */
  171. struct gsi_evt_ring_props {
  172. enum gsi_evt_chtype intf;
  173. enum gsi_intr_type intr;
  174. enum gsi_evt_ring_elem_size re_size;
  175. uint32_t ring_len;
  176. uint64_t ring_base_addr;
  177. void *ring_base_vaddr;
  178. uint16_t int_modt;
  179. uint8_t int_modc;
  180. uint32_t intvec;
  181. uint64_t msi_addr;
  182. uint64_t rp_update_addr;
  183. void *rp_update_vaddr;
  184. bool exclusive;
  185. void (*err_cb)(struct gsi_evt_err_notify *notify);
  186. void *user_data;
  187. bool evchid_valid;
  188. uint8_t evchid;
  189. uint64_t (*gsi_read_event_ring_rp)(struct gsi_evt_ring_props *props,
  190. uint8_t id, int ee);
  191. };
  192. enum gsi_chan_mode {
  193. GSI_CHAN_MODE_CALLBACK = 0x0,
  194. GSI_CHAN_MODE_POLL = 0x1,
  195. };
  196. enum gsi_chan_prot {
  197. GSI_CHAN_PROT_MHI = 0x0,
  198. GSI_CHAN_PROT_XHCI = 0x1,
  199. GSI_CHAN_PROT_GPI = 0x2,
  200. GSI_CHAN_PROT_XDCI = 0x3,
  201. GSI_CHAN_PROT_WDI2 = 0x4,
  202. GSI_CHAN_PROT_GCI = 0x5,
  203. GSI_CHAN_PROT_WDI3 = 0x6,
  204. GSI_CHAN_PROT_MHIP = 0x7,
  205. GSI_CHAN_PROT_AQC = 0x8,
  206. GSI_CHAN_PROT_11AD = 0x9,
  207. GSI_CHAN_PROT_MHIC = 0xA,
  208. GSI_CHAN_PROT_QDSS = 0xB,
  209. GSI_CHAN_PROT_RTK = 0xC,
  210. };
  211. enum gsi_max_prefetch {
  212. GSI_ONE_PREFETCH_SEG = 0x0,
  213. GSI_TWO_PREFETCH_SEG = 0x1
  214. };
  215. enum gsi_per_evt {
  216. GSI_PER_EVT_GLOB_ERROR,
  217. GSI_PER_EVT_GLOB_GP1,
  218. GSI_PER_EVT_GLOB_GP2,
  219. GSI_PER_EVT_GLOB_GP3,
  220. GSI_PER_EVT_GENERAL_BREAK_POINT,
  221. GSI_PER_EVT_GENERAL_BUS_ERROR,
  222. GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
  223. GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
  224. };
  225. /**
  226. * gsi_per_notify - Peripheral callback info
  227. *
  228. * @user_data: cookie supplied in gsi_register_device
  229. * @evt_id: type of notification
  230. * @err_desc: error related information
  231. *
  232. */
  233. struct gsi_per_notify {
  234. void *user_data;
  235. enum gsi_per_evt evt_id;
  236. union {
  237. uint16_t err_desc;
  238. } data;
  239. };
  240. /**
  241. * gsi_per_props - Peripheral related properties
  242. *
  243. * @gsi: GSI core version
  244. * @ee: EE where this driver and peripheral driver runs
  245. * @intr: control interrupt type
  246. * @intvec: write data for MSI write
  247. * @msi_addr: MSI address
  248. * @irq: IRQ number
  249. * @phys_addr: physical address of GSI block
  250. * @size: register size of GSI block
  251. * @emulator_intcntrlr_addr: the location of emulator's interrupt control block
  252. * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr
  253. * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr
  254. * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
  255. * @mhi_er_id_limits: MHI event ring start and end ids
  256. * @notify_cb: general notification callback
  257. * @req_clk_cb: callback to request peripheral clock
  258. * granted should be set to true if request is completed
  259. * synchronously, false otherwise (peripheral needs
  260. * to call gsi_complete_clk_grant later when request is
  261. * completed)
  262. * if this callback is not provided, then GSI will assume
  263. * peripheral is clocked at all times
  264. * @rel_clk_cb: callback to release peripheral clock
  265. * @user_data: cookie used for notifications
  266. * @clk_status_cb: callback to update the current msm bus clock vote
  267. * @enable_clk_bug_on: enable IPA clock for dump saving before assert
  268. * @skip_ieob_mask_wa: flag for skipping ieob_mask_wa
  269. * All the callbacks are in interrupt context
  270. * @tx_poll: propagate to relevant gsi channels that tx polling feature is on
  271. *
  272. */
  273. struct gsi_per_props {
  274. enum gsi_ver ver;
  275. unsigned int ee;
  276. enum gsi_intr_type intr;
  277. uint32_t intvec;
  278. uint64_t msi_addr;
  279. unsigned int irq;
  280. phys_addr_t phys_addr;
  281. unsigned long size;
  282. phys_addr_t emulator_intcntrlr_addr;
  283. unsigned long emulator_intcntrlr_size;
  284. irq_handler_t emulator_intcntrlr_client_isr;
  285. bool mhi_er_id_limits_valid;
  286. uint32_t mhi_er_id_limits[2];
  287. void (*notify_cb)(struct gsi_per_notify *notify);
  288. void (*req_clk_cb)(void *user_data, bool *granted);
  289. int (*rel_clk_cb)(void *user_data);
  290. void *user_data;
  291. int (*clk_status_cb)(void);
  292. void (*enable_clk_bug_on)(void);
  293. bool skip_ieob_mask_wa;
  294. bool tx_poll;
  295. };
  296. enum gsi_chan_evt {
  297. GSI_CHAN_EVT_INVALID = 0x0,
  298. GSI_CHAN_EVT_SUCCESS = 0x1,
  299. GSI_CHAN_EVT_EOT = 0x2,
  300. GSI_CHAN_EVT_OVERFLOW = 0x3,
  301. GSI_CHAN_EVT_EOB = 0x4,
  302. GSI_CHAN_EVT_OOB = 0x5,
  303. GSI_CHAN_EVT_DB_MODE = 0x6,
  304. GSI_CHAN_EVT_UNDEFINED = 0x10,
  305. GSI_CHAN_EVT_RE_ERROR = 0x11,
  306. };
  307. /**
  308. * gsi_chan_xfer_veid - Virtual Channel ID
  309. *
  310. * @GSI_VEID_0: transfer completed for VEID 0
  311. * @GSI_VEID_1: transfer completed for VEID 1
  312. * @GSI_VEID_2: transfer completed for VEID 2
  313. * @GSI_VEID_3: transfer completed for VEID 3
  314. * @GSI_VEID_DEFAULT: used when veid is invalid
  315. */
  316. enum gsi_chan_xfer_veid {
  317. GSI_VEID_0 = 0,
  318. GSI_VEID_1 = 1,
  319. GSI_VEID_2 = 2,
  320. GSI_VEID_3 = 3,
  321. GSI_VEID_DEFAULT,
  322. GSI_VEID_MAX
  323. };
  324. /**
  325. * gsi_chan_xfer_notify - Channel callback info
  326. *
  327. * @chan_user_data: cookie supplied in gsi_alloc_channel
  328. * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
  329. * event to be generated
  330. * @evt_id: type of event triggered by the associated TRE
  331. * (corresponding to xfer_user_data)
  332. * @bytes_xfered: number of bytes transferred by the associated TRE
  333. * (corresponding to xfer_user_data)
  334. * @veid: virtual endpoint id. Valid for GCI completions only
  335. *
  336. */
  337. struct gsi_chan_xfer_notify {
  338. void *chan_user_data;
  339. void *xfer_user_data;
  340. enum gsi_chan_evt evt_id;
  341. uint16_t bytes_xfered;
  342. uint8_t veid;
  343. };
  344. enum gsi_chan_err {
  345. GSI_CHAN_INVALID_TRE_ERR = 0x0,
  346. GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
  347. GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
  348. GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
  349. GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
  350. GSI_CHAN_HWO_1_ERR = 0x5
  351. };
  352. /**
  353. * gsi_chan_err_notify - Channel general callback info
  354. *
  355. * @chan_user_data: cookie supplied in gsi_alloc_channel
  356. * @evt_id: type of error
  357. * @err_desc: more info about the error
  358. *
  359. */
  360. struct gsi_chan_err_notify {
  361. void *chan_user_data;
  362. enum gsi_chan_err evt_id;
  363. uint16_t err_desc;
  364. };
  365. enum gsi_chan_ring_elem_size {
  366. GSI_CHAN_RE_SIZE_4B = 4,
  367. GSI_CHAN_RE_SIZE_8B = 8,
  368. GSI_CHAN_RE_SIZE_16B = 16,
  369. GSI_CHAN_RE_SIZE_32B = 32,
  370. GSI_CHAN_RE_SIZE_64B = 64,
  371. };
  372. enum gsi_chan_use_db_eng {
  373. GSI_CHAN_DIRECT_MODE = 0x0,
  374. GSI_CHAN_DB_MODE = 0x1,
  375. };
  376. /**
  377. * gsi_chan_props - Channel related properties
  378. *
  379. * @prot: interface type
  380. * @dir: channel direction
  381. * @ch_id: virtual channel ID
  382. * @evt_ring_hdl: handle of associated event ring. set to ~0 if no
  383. * event ring associated
  384. * @re_size: size of channel ring element
  385. * @ring_len: length of ring in bytes (must be integral multiple of
  386. * re_size)
  387. * @max_re_expected: maximal number of ring elements expected to be queued.
  388. * used for data path statistics gathering. if 0 provided
  389. * ring_len / re_size will be used.
  390. * @ring_base_addr: physical base address of ring. Address must be aligned to
  391. * ring_len rounded to power of two
  392. * @ring_base_vaddr: virtual base address of ring (set to NULL when not
  393. * applicable)
  394. * @use_db_eng: 0 => direct mode (doorbells are written directly to RE
  395. * engine)
  396. * 1 => DB mode (doorbells are written to DB engine)
  397. * @max_prefetch: limit number of pre-fetch segments for channel
  398. * @low_weight: low channel weight (priority of channel for RE engine
  399. * round robin algorithm); must be >= 1
  400. * @empty_lvl_threshold:
  401. * The thershold number of free entries available in the
  402. * receiving fifos of GSI-peripheral. If Smart PF mode
  403. * is used, REE will fetch/send new TRE to peripheral only
  404. * if peripheral's empty_level_count is higher than
  405. * EMPTY_LVL_THRSHOLD defined for this channel
  406. * @tx_poll: channel process completions in NAPI context
  407. * @xfer_cb: transfer notification callback, this callback happens
  408. * on event boundaries
  409. *
  410. * e.g. 1
  411. *
  412. * out TD with 3 REs
  413. *
  414. * RE1: EOT=0, EOB=0, CHAIN=1;
  415. * RE2: EOT=0, EOB=0, CHAIN=1;
  416. * RE3: EOT=1, EOB=0, CHAIN=0;
  417. *
  418. * the callback will be triggered for RE3 using the
  419. * xfer_user_data of that RE
  420. *
  421. * e.g. 2
  422. *
  423. * in REs
  424. *
  425. * RE1: EOT=1, EOB=0, CHAIN=0;
  426. * RE2: EOT=1, EOB=0, CHAIN=0;
  427. * RE3: EOT=1, EOB=0, CHAIN=0;
  428. *
  429. * received packet consumes all of RE1, RE2 and part of RE3
  430. * for EOT condition. there will be three callbacks in below
  431. * order
  432. *
  433. * callback for RE1 using GSI_CHAN_EVT_OVERFLOW
  434. * callback for RE2 using GSI_CHAN_EVT_OVERFLOW
  435. * callback for RE3 using GSI_CHAN_EVT_EOT
  436. *
  437. * @err_cb: error notification callback
  438. * @cleanup_cb; cleanup rx-pkt/skb callback
  439. * @chan_user_data: cookie used for notifications
  440. *
  441. * All the callbacks are in interrupt context
  442. *
  443. */
  444. struct gsi_chan_props {
  445. enum gsi_chan_prot prot;
  446. enum gsi_chan_dir dir;
  447. uint8_t ch_id;
  448. unsigned long evt_ring_hdl;
  449. enum gsi_chan_ring_elem_size re_size;
  450. uint16_t ring_len;
  451. uint16_t max_re_expected;
  452. uint64_t ring_base_addr;
  453. uint8_t db_in_bytes;
  454. void *ring_base_vaddr;
  455. enum gsi_chan_use_db_eng use_db_eng;
  456. enum gsi_max_prefetch max_prefetch;
  457. uint8_t low_weight;
  458. enum gsi_prefetch_mode prefetch_mode;
  459. uint8_t empty_lvl_threshold;
  460. bool tx_poll;
  461. void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
  462. void (*err_cb)(struct gsi_chan_err_notify *notify);
  463. void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data);
  464. void *chan_user_data;
  465. };
  466. enum gsi_xfer_flag {
  467. GSI_XFER_FLAG_CHAIN = 0x1,
  468. GSI_XFER_FLAG_EOB = 0x100,
  469. GSI_XFER_FLAG_EOT = 0x200,
  470. GSI_XFER_FLAG_BEI = 0x400
  471. };
  472. enum gsi_xfer_elem_type {
  473. GSI_XFER_ELEM_DATA,
  474. GSI_XFER_ELEM_IMME_CMD,
  475. GSI_XFER_ELEM_NOP,
  476. };
  477. /**
  478. * gsi_gpi_channel_scratch - GPI protocol SW config area of
  479. * channel scratch
  480. *
  481. * @dl_nlo_channel: Whether this is DL NLO Channel or not? Relevant for
  482. * GSI 2.5 and above where DL NLO introduced.
  483. * @max_outstanding_tre: Used for the prefetch management sequence by the
  484. * sequencer. Defines the maximum number of allowed
  485. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  486. * prefetch will be limited by this configuration. It
  487. * is suggested to configure this value to IPA_IF
  488. * channel TLV queue size times element size. To disable
  489. * the feature in doorbell mode (DB Mode=1). Maximum
  490. * outstanding TREs should be set to 64KB
  491. * (or any value larger or equal to ring length . RLEN)
  492. * The field is irrelevant starting GSI 2.5 where smart
  493. * prefetch implemented by the H/W.
  494. * @outstanding_threshold: Used for the prefetch management sequence by the
  495. * sequencer. Defines the threshold (in Bytes) as to when
  496. * to update the channel doorbell. Should be smaller than
  497. * Maximum outstanding TREs. value. It is suggested to
  498. * configure this value to 2 * element size.
  499. * The field is irrelevant starting GSI 2.5 where smart
  500. * prefetch implemented by the H/W.
  501. */
  502. struct __packed gsi_gpi_channel_scratch {
  503. uint64_t dl_nlo_channel:1; /* Relevant starting GSI 2.5 */
  504. uint64_t resvd1:63;
  505. uint32_t resvd2:16;
  506. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  507. uint32_t resvd3:16;
  508. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  509. };
  510. /**
  511. * gsi_mhi_channel_scratch - MHI protocol SW config area of
  512. * channel scratch
  513. *
  514. * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines
  515. * address in host from which channel write pointer
  516. * should be read in polling mode
  517. * @assert_bit40: 1: bit #41 in address should be asserted upon
  518. * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
  519. * transfers)
  520. * 0: bit #41 in address should be deasserted upon
  521. * IPA_IF.ProcessDescriptor routine (for non-MHI over
  522. * PCIe transfers)
  523. * @polling_configuration: Uplink channels: Defines timer to poll on MHI
  524. * context. Range: 1 to 31 milliseconds.
  525. * Downlink channel: Defines transfer ring buffer
  526. * availability threshold to poll on MHI context in
  527. * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
  528. * elements. E.g., value of 2 indicates 16 ring elements.
  529. * Valid only when Burst Mode Enabled is set to 1
  530. * @burst_mode_enabled: 0: Burst mode is disabled for this channel
  531. * 1: Burst mode is enabled for this channel
  532. * @polling_mode: 0: the channel is not in polling mode, meaning the
  533. * host should ring DBs.
  534. * 1: the channel is in polling mode, meaning the host
  535. * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
  536. * ring elements.
  537. * should not ring DBs until notified of DB mode/OOB mode
  538. * @max_outstanding_tre: Used for the prefetch management sequence by the
  539. * sequencer. Defines the maximum number of allowed
  540. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  541. * prefetch will be limited by this configuration. It
  542. * is suggested to configure this value to IPA_IF
  543. * channel TLV queue size times element size.
  544. * To disable the feature in doorbell mode (DB Mode=1).
  545. * Maximum outstanding TREs should be set to 64KB
  546. * (or any value larger or equal to ring length . RLEN)
  547. * The field is irrelevant starting GSI 2.5 where smart
  548. * prefetch implemented by the H/W.
  549. * @outstanding_threshold: Used for the prefetch management sequence by the
  550. * sequencer. Defines the threshold (in Bytes) as to when
  551. * to update the channel doorbell. Should be smaller than
  552. * Maximum outstanding TREs. value. It is suggested to
  553. * configure this value to min(TLV_FIFO_SIZE/2,8) *
  554. * element size.
  555. * The field is irrelevant starting GSI 2.5 where smart
  556. * prefetch implemented by the H/W.
  557. */
  558. struct __packed gsi_mhi_channel_scratch {
  559. uint64_t mhi_host_wp_addr;
  560. uint32_t rsvd1:1;
  561. uint32_t assert_bit40:1;
  562. uint32_t polling_configuration:5;
  563. uint32_t burst_mode_enabled:1;
  564. uint32_t polling_mode:1;
  565. uint32_t oob_mod_threshold:5;
  566. uint32_t resvd2:2;
  567. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  568. uint32_t resvd3:16;
  569. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  570. };
  571. /**
  572. * gsi_mhi_channel_scratch_v2 - MHI protocol SW config area of
  573. * channel scratch
  574. *
  575. * @mhi_host_wp_addr_lo: Valid only when UL/DL Sync En is asserted. Defines
  576. * address in host from which channel write pointer
  577. * should be read in polling mode
  578. * @mhi_host_wp_addr_hi: Valid only when UL/DL Sync En is asserted. Defines
  579. * address in host from which channel write pointer
  580. * should be read in polling mode
  581. * @assert_bit40: 1: bit #41 in address should be asserted upon
  582. * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
  583. * transfers)
  584. * 0: bit #41 in address should be deasserted upon
  585. * IPA_IF.ProcessDescriptor routine (for non-MHI over
  586. * PCIe transfers)
  587. * @polling_configuration: Uplink channels: Defines timer to poll on MHI
  588. * context. Range: 1 to 31 milliseconds.
  589. * Downlink channel: Defines transfer ring buffer
  590. * availability threshold to poll on MHI context in
  591. * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
  592. * elements. E.g., value of 2 indicates 16 ring elements.
  593. * Valid only when Burst Mode Enabled is set to 1
  594. * @burst_mode_enabled: 0: Burst mode is disabled for this channel
  595. * 1: Burst mode is enabled for this channel
  596. * @polling_mode: 0: the channel is not in polling mode, meaning the
  597. * host should ring DBs.
  598. * 1: the channel is in polling mode, meaning the host
  599. * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
  600. * ring elements.
  601. * should not ring DBs until notified of DB mode/OOB mode
  602. */
  603. struct __packed gsi_mhi_channel_scratch_v2 {
  604. uint32_t mhi_host_wp_addr_lo;
  605. uint32_t mhi_host_wp_addr_hi : 9;
  606. uint32_t polling_configuration : 5;
  607. uint32_t rsvd1 : 18;
  608. uint32_t rsvd2 : 1;
  609. uint32_t assert_bit40 : 1;
  610. uint32_t resvd3 : 5;
  611. uint32_t burst_mode_enabled : 1;
  612. uint32_t polling_mode : 1;
  613. uint32_t oob_mod_threshold : 5;
  614. uint32_t resvd4 : 18; /* Not configured by AP */
  615. uint32_t resvd5; /* Not configured by AP */
  616. };
  617. /**
  618. * gsi_xdci_channel_scratch - xDCI protocol SW config area of
  619. * channel scratch
  620. *
  621. * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi
  622. * configuration). Must be aligned to Max USB Packet Size
  623. * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
  624. * transfer resource index for the transfer, which was
  625. * returned in response to the Start Transfer command.
  626. * This field is used for "Update Transfer" command
  627. * @last_trb_addr: Address (LSB - based on alignment restrictions) of
  628. * last TRB in queue. Used to identify rollover case
  629. * @depcmd_low_addr: Used to generate "Update Transfer" command
  630. * @max_outstanding_tre: Used for the prefetch management sequence by the
  631. * sequencer. Defines the maximum number of allowed
  632. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  633. * prefetch will be limited by this configuration. It
  634. * is suggested to configure this value to IPA_IF
  635. * channel TLV queue size times element size.
  636. * To disable the feature in doorbell mode (DB Mode=1)
  637. * Maximum outstanding TREs should be set to 64KB
  638. * (or any value larger or equal to ring length . RLEN)
  639. * The field is irrelevant starting GSI 2.5 where smart
  640. * prefetch implemented by the H/W.
  641. * @depcmd_hi_addr: Used to generate "Update Transfer" command
  642. * @outstanding_threshold: Used for the prefetch management sequence by the
  643. * sequencer. Defines the threshold (in Bytes) as to when
  644. * to update the channel doorbell. Should be smaller than
  645. * Maximum outstanding TREs. value. It is suggested to
  646. * configure this value to 2 * element size. for MBIM the
  647. * suggested configuration is the element size.
  648. * The field is irrelevant starting GSI 2.5 where smart
  649. * prefetch implemented by the H/W.
  650. */
  651. struct __packed gsi_xdci_channel_scratch {
  652. uint32_t last_trb_addr:16;
  653. uint32_t resvd1:4;
  654. uint32_t xferrscidx:7;
  655. uint32_t const_buffer_size:5;
  656. uint32_t depcmd_low_addr;
  657. uint32_t depcmd_hi_addr:8;
  658. uint32_t resvd2:8;
  659. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  660. uint32_t resvd3:16;
  661. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  662. };
  663. /**
  664. * gsi_wdi_channel_scratch - WDI protocol SW config area of
  665. * channel scratch
  666. *
  667. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  668. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  669. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  670. * N is the number of packets that IPA will
  671. * process before Wifi transfer ring Ri will
  672. * be updated.
  673. * @update_ri_moderation_counter: This field is incremented with each TRE
  674. * processed in MCS.
  675. * @wdi_rx_tre_proc_in_progress: It is set if IPA IF returned BECAME FULL
  676. * status after MCS submitted an inline immediate
  677. * command to update the metadata. It allows MCS
  678. * to know that it has to retry sending the TRE
  679. * to IPA.
  680. * @wdi_rx_vdev_id: Rx only. Initialized to 0xFF by SW after allocating channel
  681. * and before starting it. Both FW_DESC and VDEV_ID are part
  682. * of a scratch word that is Read/Write for both MCS and SW.
  683. * To avoid race conditions, SW should not update this field
  684. * after starting the channel.
  685. * @wdi_rx_fw_desc: Rx only. Initialized to 0xFF by SW after allocating channel
  686. * and before starting it. After Start, this is a Read only
  687. * field for SW.
  688. * @endp_metadatareg_offset: Rx only, the offset of IPA_ENDP_INIT_HDR_METADATA
  689. * of the corresponding endpoint in 4B words from IPA
  690. * base address. Read only field for MCS.
  691. * Write for SW.
  692. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  693. * for MCS. Write for SW.
  694. * @wdi_rx_pkt_length: If WDI_RX_TRE_PROC_IN_PROGRESS is set, this field is
  695. * valid and contains the packet length of the TRE that
  696. * needs to be submitted to IPA.
  697. * @resv1: reserved bits.
  698. * @pkt_comp_count: It is incremented on each AOS received. When event ring
  699. * Write index is updated, it is decremented by the same
  700. * amount.
  701. * @stop_in_progress_stm: If a Stop request is in progress, this will indicate
  702. * the current stage of processing of the stop within MCS
  703. * @resv2: reserved bits.
  704. * wdi_rx_qmap_id_internal: Initialized to 0 by MCS when the channel is
  705. * allocated. It is updated to the current value of SW
  706. * QMAP ID that is being written by MCS to the IPA
  707. * metadata register.
  708. */
  709. struct __packed gsi_wdi_channel_scratch {
  710. uint32_t wifi_rx_ri_addr_low;
  711. uint32_t wifi_rx_ri_addr_high;
  712. uint32_t update_ri_moderation_threshold:5;
  713. uint32_t update_ri_moderation_counter:6;
  714. uint32_t wdi_rx_tre_proc_in_progress:1;
  715. uint32_t resv1:4;
  716. uint32_t wdi_rx_vdev_id:8;
  717. uint32_t wdi_rx_fw_desc:8;
  718. uint32_t endp_metadatareg_offset:16;
  719. uint32_t qmap_id:16;
  720. uint32_t wdi_rx_pkt_length:16;
  721. uint32_t resv2:2;
  722. uint32_t pkt_comp_count:11;
  723. uint32_t stop_in_progress_stm:3;
  724. uint32_t resv3:16;
  725. uint32_t wdi_rx_qmap_id_internal:16;
  726. };
  727. /**
  728. * gsi_wdi2_channel_scratch_lito - WDI protocol SW config area of
  729. * channel scratch
  730. *
  731. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  732. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  733. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  734. * N is the number of packets that IPA will
  735. * process before Wifi transfer ring Ri will
  736. * be updated.
  737. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  738. * for MCS. Write for SW.
  739. * @endp_metadatareg_offset: Rx only, the offset of IPA_ENDP_INIT_HDR_METADATA
  740. * of the corresponding endpoint in 4B words from IPA
  741. * base address. Read only field for MCS.
  742. * Write for SW.
  743. * @wdi_rx_vdev_id: Rx only. Initialized to 0xFF by SW after allocating channel
  744. * and before starting it. Both FW_DESC and VDEV_ID are part
  745. * of a scratch word that is Read/Write for both MCS and SW.
  746. * To avoid race conditions, SW should not update this field
  747. * after starting the channel.
  748. * @wdi_rx_fw_desc: Rx only. Initialized to 0xFF by SW after allocating channel
  749. * and before starting it. After Start, this is a Read only
  750. * field for SW.
  751. * @update_ri_moderation_counter: This field is incremented with each TRE
  752. * processed in MCS.
  753. * @wdi_rx_tre_proc_in_progress: It is set if IPA IF returned BECAME FULL
  754. * status after MCS submitted an inline immediate
  755. * command to update the metadata. It allows MCS
  756. * to know that it has to retry sending the TRE
  757. * to IPA.
  758. * @outstanding_tlvs_counter: It is the count of outstanding TLVs submitted to
  759. * IPA by MCS and waiting for AOS completion from IPA.
  760. * @wdi_rx_pkt_length: If WDI_RX_TRE_PROC_IN_PROGRESS is set, this field is
  761. * valid and contains the packet length of the TRE that
  762. * needs to be submitted to IPA.
  763. * @resv1: reserved bits.
  764. * @pkt_comp_count: It is incremented on each AOS received. When event ring
  765. * Write index is updated, it is decremented by the same
  766. * amount.
  767. * @stop_in_progress_stm: If a Stop request is in progress, this will indicate
  768. * the current stage of processing of the stop within MCS
  769. * @resv2: reserved bits.
  770. * wdi_rx_qmap_id_internal: Initialized to 0 by MCS when the channel is
  771. * allocated. It is updated to the current value of SW
  772. * QMAP ID that is being written by MCS to the IPA
  773. * metadata register.
  774. */
  775. struct __packed gsi_wdi2_channel_scratch_new {
  776. uint32_t wifi_rx_ri_addr_low;
  777. uint32_t wifi_rx_ri_addr_high;
  778. uint32_t update_ri_moderation_threshold:5;
  779. uint32_t qmap_id:8;
  780. uint32_t resv1:3;
  781. uint32_t endp_metadatareg_offset:16;
  782. uint32_t wdi_rx_vdev_id:8;
  783. uint32_t wdi_rx_fw_desc:8;
  784. uint32_t update_ri_moderation_counter:6;
  785. uint32_t wdi_rx_tre_proc_in_progress:1;
  786. uint32_t resv4:1;
  787. uint32_t outstanding_tlvs_counter:8;
  788. uint32_t wdi_rx_pkt_length:16;
  789. uint32_t resv2:2;
  790. uint32_t pkt_comp_count:11;
  791. uint32_t stop_in_progress_stm:3;
  792. uint32_t resv3:16;
  793. uint32_t wdi_rx_qmap_id_internal:16;
  794. };
  795. /**
  796. * gsi_mhip_channel_scratch - MHI PRIME protocol SW config area of
  797. * channel scratch
  798. * @assert_bit_40: Valid only for non-host channels.
  799. * Set to 1 for MHI’ channels when running over PCIe.
  800. * @host_channel: Set to 1 for MHIP channel running on host.
  801. *
  802. */
  803. struct __packed gsi_mhip_channel_scratch {
  804. uint32_t assert_bit_40:1;
  805. uint32_t host_channel:1;
  806. uint32_t resvd1:30;
  807. };
  808. /**
  809. * gsi_11ad_rx_channel_scratch - 11AD protocol SW config area of
  810. * RX channel scratch
  811. *
  812. * @status_ring_hwtail_address_lsb: Low 32 bits of status ring hwtail address.
  813. * @status_ring_hwtail_address_msb: High 32 bits of status ring hwtail address.
  814. * @data_buffers_base_address_lsb: Low 32 bits of the data buffers address.
  815. * @data_buffers_base_address_msb: High 32 bits of the data buffers address.
  816. * @fixed_data_buffer_size: the fixed buffer size (> MTU).
  817. * @resv1: reserved bits.
  818. */
  819. struct __packed gsi_11ad_rx_channel_scratch {
  820. uint32_t status_ring_hwtail_address_lsb;
  821. uint32_t status_ring_hwtail_address_msb;
  822. uint32_t data_buffers_base_address_lsb;
  823. uint32_t data_buffers_base_address_msb:8;
  824. uint32_t fixed_data_buffer_size_pow_2:16;
  825. uint32_t resv1:8;
  826. };
  827. /**
  828. * gsi_11ad_tx_channel_scratch - 11AD protocol SW config area of
  829. * TX channel scratch
  830. *
  831. * @status_ring_hwtail_address_lsb: Low 32 bits of status ring hwtail address.
  832. * @status_ring_hwhead_address_lsb: Low 32 bits of status ring hwhead address.
  833. * @status_ring_hwhead_hwtail_8_msb: higher 8 msbs of status ring
  834. * hwhead\hwtail addresses (should be identical).
  835. * @update_status_hwtail_mod_threshold: The threshold in (32B) elements for
  836. * updating descriptor ring 11ad HWTAIL pointer moderation.
  837. * @status_ring_num_elem - the number of elements in the status ring.
  838. * @resv1: reserved bits.
  839. * @fixed_data_buffer_size_pow_2: the fixed buffer size power of 2 (> MTU).
  840. * @resv2: reserved bits.
  841. */
  842. struct __packed gsi_11ad_tx_channel_scratch {
  843. uint32_t status_ring_hwtail_address_lsb;
  844. uint32_t status_ring_hwhead_address_lsb;
  845. uint32_t status_ring_hwhead_hwtail_8_msb:8;
  846. uint32_t update_status_hwtail_mod_threshold:8;
  847. uint32_t status_ring_num_elem:16;
  848. uint32_t resv1:8;
  849. uint32_t fixed_data_buffer_size_pow_2:16;
  850. uint32_t resv2:8;
  851. };
  852. /**
  853. * gsi_wdi3_channel_scratch - WDI protocol 3 SW config area of
  854. * channel scratch
  855. *
  856. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  857. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  858. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  859. * N is the number of packets that IPA will
  860. * process before Wifi transfer ring Ri will
  861. * be updated.
  862. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  863. * for MCS. Write for SW.
  864. * @resv: reserved bits.
  865. * @endp_metadata_reg_offset: Rx only, the offset of
  866. * IPA_ENDP_INIT_HDR_METADATA_n of the
  867. * corresponding endpoint in 4B words from IPA
  868. * base address.
  869. * @rx_pkt_offset: Rx only, Since Rx header length is not fixed,
  870. * WLAN host will pass this information to IPA.
  871. * @resv: reserved bits.
  872. */
  873. struct __packed gsi_wdi3_channel_scratch {
  874. uint32_t wifi_rp_address_low;
  875. uint32_t wifi_rp_address_high;
  876. uint32_t update_rp_moderation_threshold : 5;
  877. uint32_t qmap_id : 8;
  878. uint32_t reserved1 : 3;
  879. uint32_t endp_metadata_reg_offset : 16;
  880. uint32_t rx_pkt_offset : 16;
  881. uint32_t reserved2 : 16;
  882. };
  883. /**
  884. * gsi_qdss_channel_scratch - QDSS SW config area of
  885. * channel scratch
  886. *
  887. * @bam_p_evt_dest_addr: equivalent to event_ring_doorbell_pa
  888. * physical address of the doorbell that IPA uC
  889. * will update the headpointer of the event ring.
  890. * QDSS should send BAM_P_EVNT_REG address in this var
  891. * Configured with the GSI Doorbell Address.
  892. * GSI sends Update RP by doing a write to this address
  893. * @data_fifo_base_addr: Base address of the data FIFO used by BAM
  894. * @data_fifo_size: Size of the data FIFO
  895. * @bam_p_evt_threshold: Threshold level of how many bytes consumed
  896. * @override_eot: if override EOT==1, it doesn't check the EOT bit in
  897. * the descriptor
  898. */
  899. struct __packed gsi_qdss_channel_scratch {
  900. uint32_t bam_p_evt_dest_addr;
  901. uint32_t data_fifo_base_addr;
  902. uint32_t data_fifo_size : 16;
  903. uint32_t bam_p_evt_threshold : 16;
  904. uint32_t reserved1 : 2;
  905. uint32_t override_eot : 1;
  906. uint32_t reserved2 : 29;
  907. };
  908. /**
  909. * gsi_wdi3_channel_scratch2 - WDI3 protocol SW config area of
  910. * channel scratch2
  911. *
  912. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  913. * N is the number of packets that IPA will
  914. * process before Wifi transfer ring Ri will
  915. * be updated.
  916. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only
  917. * field for MCS. Write for SW.
  918. * @resv: reserved bits.
  919. * @endp_metadata_reg_offset: Rx only, the offset of
  920. * IPA_ENDP_INIT_HDR_METADATA_n of the
  921. * corresponding endpoint in 4B words from IPA
  922. * base address.
  923. */
  924. struct __packed gsi_wdi3_channel_scratch2 {
  925. uint32_t update_rp_moderation_threshold : 5;
  926. uint32_t qmap_id : 8;
  927. uint32_t reserved1 : 3;
  928. uint32_t endp_metadata_reg_offset : 16;
  929. };
  930. /**
  931. * gsi_wdi3_channel_scratch2_reg - channel scratch2 SW config area
  932. *
  933. */
  934. union __packed gsi_wdi3_channel_scratch2_reg {
  935. struct __packed gsi_wdi3_channel_scratch2 wdi;
  936. struct __packed {
  937. uint32_t word1;
  938. } data;
  939. };
  940. /**
  941. * gsi_rtk_channel_scratch - Realtek SW config area of
  942. * channel scratch
  943. *
  944. * @rtk_bar_low: Realtek bar address LSB
  945. * @rtk_bar_high: Realtek bar address MSB
  946. * @queue_number: dma channel number in rtk
  947. * @fix_buff_size: buff size in KB
  948. * @rtk_buff_addr_high: buffer addr where TRE points to
  949. * @rtk_buff_addr_low: buffer addr where TRE points to
  950. * the descriptor
  951. */
  952. struct __packed gsi_rtk_channel_scratch {
  953. uint32_t rtk_bar_low;
  954. uint32_t rtk_bar_high : 9;
  955. uint32_t queue_number : 5;
  956. uint32_t fix_buff_size : 4;
  957. uint32_t reserved1 : 6;
  958. uint32_t rtk_buff_addr_high : 8;
  959. uint32_t rtk_buff_addr_low;
  960. uint32_t reserved2;
  961. };
  962. /**
  963. * gsi_aqc_channel_scratch - AQC SW config area of
  964. * channel scratch
  965. *
  966. * @buff_addr_lsb: AQC buffer address LSB (RX)
  967. * @buff_addr_msb: AQC buffer address MSB (RX)
  968. * @fix_buff_size: buff size in log2
  969. * @head_ptr_lsb: head pointer address LSB (RX)
  970. * @head_ptr_msb: head pointer address MSB (RX)
  971. */
  972. struct __packed gsi_aqc_channel_scratch {
  973. uint32_t buff_addr_lsb;
  974. uint32_t buff_addr_msb : 8;
  975. uint32_t reserved1 : 8;
  976. unsigned fix_buff_size : 16;
  977. uint32_t head_ptr_lsb;
  978. uint32_t head_ptr_msb : 9;
  979. uint32_t reserved2 : 23;
  980. };
  981. /**
  982. * gsi_channel_scratch - channel scratch SW config area
  983. *
  984. */
  985. union __packed gsi_channel_scratch {
  986. struct __packed gsi_gpi_channel_scratch gpi;
  987. struct __packed gsi_mhi_channel_scratch mhi;
  988. struct __packed gsi_mhi_channel_scratch_v2 mhi_v2;
  989. struct __packed gsi_xdci_channel_scratch xdci;
  990. struct __packed gsi_wdi_channel_scratch wdi;
  991. struct __packed gsi_11ad_rx_channel_scratch rx_11ad;
  992. struct __packed gsi_11ad_tx_channel_scratch tx_11ad;
  993. struct __packed gsi_wdi3_channel_scratch wdi3;
  994. struct __packed gsi_mhip_channel_scratch mhip;
  995. struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
  996. struct __packed gsi_aqc_channel_scratch aqc;
  997. struct __packed gsi_rtk_channel_scratch rtk;
  998. struct __packed gsi_qdss_channel_scratch qdss;
  999. struct __packed {
  1000. uint32_t word1;
  1001. uint32_t word2;
  1002. uint32_t word3;
  1003. uint32_t word4;
  1004. } data;
  1005. };
  1006. /**
  1007. * gsi_wdi_channel_scratch3 - WDI protocol SW config area of
  1008. * channel scratch3
  1009. */
  1010. struct __packed gsi_wdi_channel_scratch3 {
  1011. uint32_t endp_metadatareg_offset:16;
  1012. uint32_t qmap_id:16;
  1013. };
  1014. /**
  1015. * gsi_wdi_channel_scratch3_reg - channel scratch3 SW config area
  1016. *
  1017. */
  1018. union __packed gsi_wdi_channel_scratch3_reg {
  1019. struct __packed gsi_wdi_channel_scratch3 wdi;
  1020. struct __packed {
  1021. uint32_t word1;
  1022. } data;
  1023. };
  1024. /**
  1025. * gsi_wdi2_channel_scratch2 - WDI protocol SW config area of
  1026. * channel scratch2
  1027. */
  1028. struct __packed gsi_wdi2_channel_scratch2 {
  1029. uint32_t update_ri_moderation_threshold:5;
  1030. uint32_t qmap_id:8;
  1031. uint32_t resv1:3;
  1032. uint32_t endp_metadatareg_offset:16;
  1033. };
  1034. /**
  1035. * gsi_wdi_channel_scratch2_reg - channel scratch2 SW config area
  1036. *
  1037. */
  1038. union __packed gsi_wdi2_channel_scratch2_reg {
  1039. struct __packed gsi_wdi2_channel_scratch2 wdi;
  1040. struct __packed {
  1041. uint32_t word1;
  1042. } data;
  1043. };
  1044. /**
  1045. * gsi_mhi_evt_scratch - MHI protocol SW config area of
  1046. * event scratch
  1047. */
  1048. struct __packed gsi_mhi_evt_scratch {
  1049. uint32_t resvd1;
  1050. uint32_t resvd2;
  1051. };
  1052. /**
  1053. * gsi_mhip_evt_scratch - MHI PRIME protocol SW config area of
  1054. * event scratch
  1055. */
  1056. struct __packed gsi_mhip_evt_scratch {
  1057. uint32_t rp_mod_threshold:8;
  1058. uint32_t rp_mod_timer:4;
  1059. uint32_t rp_mod_counter:8;
  1060. uint32_t rp_mod_timer_id:4;
  1061. uint32_t rp_mod_timer_running:1;
  1062. uint32_t resvd1:7;
  1063. uint32_t fixed_buffer_sz:16;
  1064. uint32_t resvd2:16;
  1065. };
  1066. /**
  1067. * gsi_xdci_evt_scratch - xDCI protocol SW config area of
  1068. * event scratch
  1069. *
  1070. */
  1071. struct __packed gsi_xdci_evt_scratch {
  1072. uint32_t gevntcount_low_addr;
  1073. uint32_t gevntcount_hi_addr:8;
  1074. uint32_t resvd1:24;
  1075. };
  1076. /**
  1077. * gsi_wdi_evt_scratch - WDI protocol SW config area of
  1078. * event scratch
  1079. *
  1080. */
  1081. struct __packed gsi_wdi_evt_scratch {
  1082. uint32_t update_ri_moderation_config:8;
  1083. uint32_t resvd1:8;
  1084. uint32_t update_ri_mod_timer_running:1;
  1085. uint32_t evt_comp_count:14;
  1086. uint32_t resvd2:1;
  1087. uint32_t last_update_ri:16;
  1088. uint32_t resvd3:16;
  1089. };
  1090. /**
  1091. * gsi_11ad_evt_scratch - 11AD protocol SW config area of
  1092. * event scratch
  1093. *
  1094. */
  1095. struct __packed gsi_11ad_evt_scratch {
  1096. uint32_t update_status_hwtail_mod_threshold : 8;
  1097. uint32_t resvd1:8;
  1098. uint32_t resvd2:16;
  1099. uint32_t resvd3;
  1100. };
  1101. /**
  1102. * gsi_wdi3_evt_scratch - wdi3 protocol SW config area of
  1103. * event scratch
  1104. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  1105. * N is the number of packets that IPA will
  1106. * process before Wifi transfer ring Ri will
  1107. * be updated.
  1108. * @reserved1: reserve bit.
  1109. * @reserved2: reserve bit.
  1110. */
  1111. struct __packed gsi_wdi3_evt_scratch {
  1112. uint32_t update_rp_moderation_config : 8;
  1113. uint32_t reserved1 : 24;
  1114. uint32_t reserved2;
  1115. };
  1116. /**
  1117. * gsi_rtk_evt_scratch - realtek protocol SW config area of
  1118. * event scratch
  1119. * @reserved1: reserve bit.
  1120. * @reserved2: reserve bit.
  1121. */
  1122. struct __packed gsi_rtk_evt_scratch {
  1123. uint32_t reserved1;
  1124. uint32_t reserved2;
  1125. };
  1126. /**
  1127. * gsi_aqc_evt_scratch - AQC protocol SW config area of
  1128. * event scratch
  1129. * @head_ptr_wrb_mod_threshold: head pointer write-back moderation threshold
  1130. * @reserved1-3: reserve bit.
  1131. */
  1132. struct __packed gsi_aqc_evt_scratch {
  1133. uint8_t head_ptr_wrb_mod_threshold;
  1134. uint8_t reserved1;
  1135. uint16_t reserved2;
  1136. uint32_t reserved3;
  1137. };
  1138. /**
  1139. * gsi_evt_scratch - event scratch SW config area
  1140. *
  1141. */
  1142. union __packed gsi_evt_scratch {
  1143. struct __packed gsi_mhi_evt_scratch mhi;
  1144. struct __packed gsi_xdci_evt_scratch xdci;
  1145. struct __packed gsi_wdi_evt_scratch wdi;
  1146. struct __packed gsi_11ad_evt_scratch w11ad;
  1147. struct __packed gsi_wdi3_evt_scratch wdi3;
  1148. struct __packed gsi_mhip_evt_scratch mhip;
  1149. struct __packed gsi_aqc_evt_scratch aqc;
  1150. struct __packed gsi_rtk_evt_scratch rtk;
  1151. struct __packed {
  1152. uint32_t word1;
  1153. uint32_t word2;
  1154. } data;
  1155. };
  1156. /**
  1157. * gsi_device_scratch - EE scratch config parameters
  1158. *
  1159. * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
  1160. * @mhi_base_chan_idx: base index of IPA MHI channel indexes.
  1161. * IPA MHI channel index = GSI channel ID +
  1162. * MHI base channel index
  1163. * @max_usb_pkt_size_valid: is max_usb_pkt_size valid?
  1164. * @max_usb_pkt_size: max USB packet size in bytes (valid values are
  1165. * 64, 512 and 1024)
  1166. */
  1167. struct gsi_device_scratch {
  1168. bool mhi_base_chan_idx_valid;
  1169. uint8_t mhi_base_chan_idx;
  1170. bool max_usb_pkt_size_valid;
  1171. uint16_t max_usb_pkt_size;
  1172. };
  1173. /**
  1174. * gsi_chan_info - information about channel occupancy
  1175. *
  1176. * @wp: channel write pointer (physical address)
  1177. * @rp: channel read pointer (physical address)
  1178. * @evt_valid: is evt* info valid?
  1179. * @evt_wp: event ring write pointer (physical address)
  1180. * @evt_rp: event ring read pointer (physical address)
  1181. */
  1182. struct gsi_chan_info {
  1183. uint64_t wp;
  1184. uint64_t rp;
  1185. bool evt_valid;
  1186. uint64_t evt_wp;
  1187. uint64_t evt_rp;
  1188. };
  1189. enum gsi_evt_ring_state {
  1190. GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
  1191. GSI_EVT_RING_STATE_ALLOCATED = 0x1,
  1192. GSI_EVT_RING_STATE_ERROR = 0xf
  1193. };
  1194. enum gsi_chan_state {
  1195. GSI_CHAN_STATE_NOT_ALLOCATED = 0x0,
  1196. GSI_CHAN_STATE_ALLOCATED = 0x1,
  1197. GSI_CHAN_STATE_STARTED = 0x2,
  1198. GSI_CHAN_STATE_STOPPED = 0x3,
  1199. GSI_CHAN_STATE_STOP_IN_PROC = 0x4,
  1200. GSI_CHAN_STATE_FLOW_CONTROL = 0x5,
  1201. GSI_CHAN_STATE_ERROR = 0xf
  1202. };
  1203. struct gsi_ring_ctx {
  1204. spinlock_t slock;
  1205. unsigned long base_va;
  1206. uint64_t base;
  1207. uint64_t wp;
  1208. uint64_t rp;
  1209. uint64_t wp_local;
  1210. uint64_t rp_local;
  1211. uint32_t len;
  1212. uint8_t elem_sz;
  1213. uint16_t max_num_elem;
  1214. uint64_t end;
  1215. };
  1216. struct gsi_chan_dp_stats {
  1217. unsigned long ch_below_lo;
  1218. unsigned long ch_below_hi;
  1219. unsigned long ch_above_hi;
  1220. unsigned long empty_time;
  1221. unsigned long last_timestamp;
  1222. };
  1223. struct gsi_chan_stats {
  1224. unsigned long queued;
  1225. unsigned long completed;
  1226. unsigned long callback_to_poll;
  1227. unsigned long poll_to_callback;
  1228. unsigned long poll_pending_irq;
  1229. unsigned long invalid_tre_error;
  1230. unsigned long poll_ok;
  1231. unsigned long poll_empty;
  1232. unsigned long userdata_in_use;
  1233. struct gsi_chan_dp_stats dp;
  1234. };
  1235. /**
  1236. * struct gsi_user_data - user_data element pointed by the TRE
  1237. * @valid: valid to be cleaned. if its true that means it is being used.
  1238. * false means its free to overwrite
  1239. * @p: pointer to the user data array element
  1240. */
  1241. struct gsi_user_data {
  1242. bool valid;
  1243. void *p;
  1244. };
  1245. struct gsi_chan_ctx {
  1246. struct gsi_chan_props props;
  1247. enum gsi_chan_state state;
  1248. struct gsi_ring_ctx ring;
  1249. struct gsi_user_data *user_data;
  1250. struct gsi_evt_ctx *evtr;
  1251. struct mutex mlock;
  1252. struct completion compl;
  1253. bool allocated;
  1254. atomic_t poll_mode;
  1255. union __packed gsi_channel_scratch scratch;
  1256. struct gsi_chan_stats stats;
  1257. bool enable_dp_stats;
  1258. bool print_dp_stats;
  1259. };
  1260. struct gsi_evt_stats {
  1261. unsigned long completed;
  1262. };
  1263. struct gsi_evt_ctx {
  1264. struct gsi_evt_ring_props props;
  1265. enum gsi_evt_ring_state state;
  1266. uint8_t id;
  1267. struct gsi_ring_ctx ring;
  1268. struct mutex mlock;
  1269. struct completion compl;
  1270. struct gsi_chan_ctx *chan[MAX_CHANNELS_SHARING_EVENT_RING];
  1271. uint8_t num_of_chan_allocated;
  1272. atomic_t chan_ref_cnt;
  1273. union __packed gsi_evt_scratch scratch;
  1274. struct gsi_evt_stats stats;
  1275. };
  1276. struct gsi_ee_scratch {
  1277. union __packed {
  1278. struct {
  1279. uint32_t inter_ee_cmd_return_code:3;
  1280. uint32_t resvd1:2;
  1281. uint32_t generic_ee_cmd_return_code:3;
  1282. uint32_t resvd2:2;
  1283. uint32_t generic_ee_cmd_return_val:3;
  1284. uint32_t resvd4:2;
  1285. uint32_t max_usb_pkt_size:1;
  1286. uint32_t resvd3:8;
  1287. uint32_t mhi_base_chan_idx:8;
  1288. } s;
  1289. uint32_t val;
  1290. } word0;
  1291. uint32_t word1;
  1292. };
  1293. struct ch_debug_stats {
  1294. unsigned long ch_allocate;
  1295. unsigned long ch_start;
  1296. unsigned long ch_stop;
  1297. unsigned long ch_reset;
  1298. unsigned long ch_de_alloc;
  1299. unsigned long ch_db_stop;
  1300. unsigned long cmd_completed;
  1301. };
  1302. struct gsi_generic_ee_cmd_debug_stats {
  1303. unsigned long halt_channel;
  1304. unsigned long flow_ctrl_channel;
  1305. };
  1306. struct gsi_coal_chan_info {
  1307. uint8_t ch_id;
  1308. uint8_t evchid;
  1309. };
  1310. struct gsi_log_ts {
  1311. u64 timestamp;
  1312. u64 qtimer;
  1313. u32 interrupt_type;
  1314. };
  1315. struct gsi_ctx {
  1316. void __iomem *base;
  1317. struct device *dev;
  1318. struct gsi_per_props per;
  1319. bool per_registered;
  1320. struct gsi_chan_ctx chan[GSI_CHAN_MAX];
  1321. struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
  1322. struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
  1323. struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
  1324. struct mutex mlock;
  1325. spinlock_t slock;
  1326. unsigned long evt_bmap;
  1327. bool enabled;
  1328. atomic_t num_chan;
  1329. atomic_t num_evt_ring;
  1330. struct gsi_ee_scratch scratch;
  1331. int num_ch_dp_stats;
  1332. struct workqueue_struct *dp_stat_wq;
  1333. u32 max_ch;
  1334. u32 max_ev;
  1335. struct completion gen_ee_cmd_compl;
  1336. void *ipc_logbuf;
  1337. void *ipc_logbuf_low;
  1338. struct gsi_coal_chan_info coal_info;
  1339. /*
  1340. * The following used only on emulation systems.
  1341. */
  1342. void __iomem *intcntrlr_base;
  1343. u32 intcntrlr_mem_size;
  1344. irq_handler_t intcntrlr_gsi_isr;
  1345. irq_handler_t intcntrlr_client_isr;
  1346. struct gsi_log_ts gsi_isr_cache[GSI_ISR_CACHE_MAX];
  1347. int gsi_isr_cache_index;
  1348. atomic_t num_unclock_irq;
  1349. };
  1350. enum gsi_re_type {
  1351. GSI_RE_XFER = 0x2,
  1352. GSI_RE_IMMD_CMD = 0x3,
  1353. GSI_RE_NOP = 0x4,
  1354. GSI_RE_COAL = 0x8,
  1355. };
  1356. struct __packed gsi_tre {
  1357. uint64_t buffer_ptr;
  1358. uint16_t buf_len;
  1359. uint16_t resvd1;
  1360. uint16_t chain:1;
  1361. uint16_t resvd4:7;
  1362. uint16_t ieob:1;
  1363. uint16_t ieot:1;
  1364. uint16_t bei:1;
  1365. uint16_t resvd3:5;
  1366. uint8_t re_type;
  1367. uint8_t resvd2;
  1368. };
  1369. struct __packed gsi_gci_tre {
  1370. uint64_t buffer_ptr:41;
  1371. uint64_t resvd1:7;
  1372. uint64_t buf_len:16;
  1373. uint64_t cookie:40;
  1374. uint64_t resvd2:8;
  1375. uint64_t re_type:8;
  1376. uint64_t resvd3:8;
  1377. };
  1378. #define GSI_XFER_COMPL_TYPE_GCI 0x28
  1379. struct __packed gsi_xfer_compl_evt {
  1380. union {
  1381. uint64_t xfer_ptr;
  1382. struct {
  1383. uint64_t cookie:40;
  1384. uint64_t resvd1:24;
  1385. };
  1386. };
  1387. uint16_t len;
  1388. uint8_t veid;
  1389. uint8_t code; /* see gsi_chan_evt */
  1390. uint16_t resvd;
  1391. uint8_t type;
  1392. uint8_t chid;
  1393. };
  1394. enum gsi_err_type {
  1395. GSI_ERR_TYPE_GLOB = 0x1,
  1396. GSI_ERR_TYPE_CHAN = 0x2,
  1397. GSI_ERR_TYPE_EVT = 0x3,
  1398. };
  1399. enum gsi_err_code {
  1400. GSI_INVALID_TRE_ERR = 0x1,
  1401. GSI_OUT_OF_BUFFERS_ERR = 0x2,
  1402. GSI_OUT_OF_RESOURCES_ERR = 0x3,
  1403. GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
  1404. GSI_EVT_RING_EMPTY_ERR = 0x5,
  1405. GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
  1406. GSI_HWO_1_ERR = 0x8
  1407. };
  1408. struct __packed gsi_log_err {
  1409. uint32_t arg3:4;
  1410. uint32_t arg2:4;
  1411. uint32_t arg1:4;
  1412. uint32_t code:4;
  1413. uint32_t resvd:3;
  1414. uint32_t virt_idx:5;
  1415. uint32_t err_type:4;
  1416. uint32_t ee:4;
  1417. };
  1418. enum gsi_ch_cmd_opcode {
  1419. GSI_CH_ALLOCATE = 0x0,
  1420. GSI_CH_START = 0x1,
  1421. GSI_CH_STOP = 0x2,
  1422. GSI_CH_RESET = 0x9,
  1423. GSI_CH_DE_ALLOC = 0xa,
  1424. GSI_CH_DB_STOP = 0xb,
  1425. };
  1426. enum gsi_evt_ch_cmd_opcode {
  1427. GSI_EVT_ALLOCATE = 0x0,
  1428. GSI_EVT_RESET = 0x9,
  1429. GSI_EVT_DE_ALLOC = 0xa,
  1430. };
  1431. enum gsi_generic_ee_cmd_opcode {
  1432. GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
  1433. GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2,
  1434. GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL = 0x3,
  1435. GSI_GEN_EE_CMD_DISABLE_FLOW_CHANNEL = 0x4,
  1436. GSI_GEN_EE_CMD_QUERY_FLOW_CHANNEL = 0x5,
  1437. };
  1438. enum gsi_generic_ee_cmd_return_code {
  1439. GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
  1440. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
  1441. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
  1442. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
  1443. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
  1444. GSI_GEN_EE_CMD_RETURN_CODE_RETRY = 0x6,
  1445. GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES = 0x7,
  1446. };
  1447. /**
  1448. * struct gsi_hw_profiling_data - GSI profiling data
  1449. * @bp_cnt: Back Pressure occurences count
  1450. * @bp_and_pending_cnt: Back Pressure with pending back pressure count
  1451. * @mcs_busy_cnt: Cycle count for MCS busy
  1452. * @mcs_idle_cnt: Cycle count for MCS idle
  1453. */
  1454. struct gsi_hw_profiling_data {
  1455. u64 bp_cnt;
  1456. u64 bp_and_pending_cnt;
  1457. u64 mcs_busy_cnt;
  1458. u64 mcs_idle_cnt;
  1459. };
  1460. /**
  1461. * struct gsi_fw_version - GSI fw version data
  1462. * @hw: HW version
  1463. * @flavor: Flavor identifier
  1464. * @fw: FW version
  1465. */
  1466. struct gsi_fw_version {
  1467. u32 hw;
  1468. u32 flavor;
  1469. u32 fw;
  1470. };
  1471. enum gsi_generic_ee_cmd_query_retun_val {
  1472. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_PRIMARY = 0,
  1473. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_SECONDARY = 1,
  1474. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_PENDING = 2,
  1475. };
  1476. extern struct gsi_ctx *gsi_ctx;
  1477. /**
  1478. * gsi_xfer_elem - Metadata about a single transfer
  1479. *
  1480. * @addr: physical address of buffer
  1481. * @len: size of buffer for GSI_XFER_ELEM_DATA:
  1482. * for outbound transfers this is the number of bytes to
  1483. * transfer.
  1484. * for inbound transfers, this is the maximum number of
  1485. * bytes the host expects from device in this transfer
  1486. *
  1487. * immediate command opcode for GSI_XFER_ELEM_IMME_CMD
  1488. * @flags: transfer flags, OR of all the applicable flags
  1489. *
  1490. * GSI_XFER_FLAG_BEI: Block event interrupt
  1491. * 1: Event generated by this ring element must not assert
  1492. * an interrupt to the host
  1493. * 0: Event generated by this ring element must assert an
  1494. * interrupt to the host
  1495. *
  1496. * GSI_XFER_FLAG_EOT: Interrupt on end of transfer
  1497. * 1: If an EOT condition is encountered when processing
  1498. * this ring element, an event is generated by the device
  1499. * with its completion code set to EOT.
  1500. * 0: If an EOT condition is encountered for this ring
  1501. * element, a completion event is not be generated by the
  1502. * device, unless IEOB is 1
  1503. *
  1504. * GSI_XFER_FLAG_EOB: Interrupt on end of block
  1505. * 1: Device notifies host after processing this ring element
  1506. * by sending a completion event
  1507. * 0: Completion event is not required after processing this
  1508. * ring element
  1509. *
  1510. * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
  1511. * elements in a TD
  1512. *
  1513. * @type: transfer type
  1514. *
  1515. * GSI_XFER_ELEM_DATA: for all data transfers
  1516. * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
  1517. * GSI_XFER_ELEM_NOP: for event generation only
  1518. *
  1519. * @xfer_user_data: cookie used in xfer_cb
  1520. *
  1521. */
  1522. struct gsi_xfer_elem {
  1523. uint64_t addr;
  1524. uint16_t len;
  1525. uint16_t flags;
  1526. enum gsi_xfer_elem_type type;
  1527. void *xfer_user_data;
  1528. };
  1529. /**
  1530. * gsi_alloc_evt_ring - Peripheral should call this function to
  1531. * allocate an event ring
  1532. *
  1533. * @props: Event ring properties
  1534. * @dev_hdl: Client handle previously obtained from
  1535. * gsi_register_device
  1536. * @evt_ring_hdl: Handle populated by GSI, opaque to client
  1537. *
  1538. * This function can sleep
  1539. *
  1540. * @Return gsi_status
  1541. */
  1542. int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
  1543. unsigned long *evt_ring_hdl);
  1544. /**
  1545. * gsi_dealloc_evt_ring - Peripheral should call this function to
  1546. * de-allocate an event ring. There should not exist any active
  1547. * channels using this event ring
  1548. *
  1549. * @evt_ring_hdl: Client handle previously obtained from
  1550. * gsi_alloc_evt_ring
  1551. *
  1552. * This function can sleep
  1553. *
  1554. * @Return gsi_status
  1555. */
  1556. int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
  1557. /**
  1558. * gsi_alloc_channel - Peripheral should call this function to
  1559. * allocate a channel
  1560. *
  1561. * @props: Channel properties
  1562. * @dev_hdl: Client handle previously obtained from
  1563. * gsi_register_device
  1564. * @chan_hdl: Handle populated by GSI, opaque to client
  1565. *
  1566. * This function can sleep
  1567. *
  1568. * @Return gsi_status
  1569. */
  1570. int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
  1571. unsigned long *chan_hdl);
  1572. /**
  1573. * gsi_start_channel - Peripheral should call this function to
  1574. * start a channel i.e put into running state
  1575. *
  1576. * @chan_hdl: Client handle previously obtained from
  1577. * gsi_alloc_channel
  1578. *
  1579. * This function can sleep
  1580. *
  1581. * @Return gsi_status
  1582. */
  1583. int gsi_start_channel(unsigned long chan_hdl);
  1584. /**
  1585. * gsi_reset_channel - Peripheral should call this function to
  1586. * reset a channel to recover from error state
  1587. *
  1588. * @chan_hdl: Client handle previously obtained from
  1589. * gsi_alloc_channel
  1590. *
  1591. * This function can sleep
  1592. *
  1593. * @Return gsi_status
  1594. */
  1595. int gsi_reset_channel(unsigned long chan_hdl);
  1596. /**
  1597. * gsi_dealloc_channel - Peripheral should call this function to
  1598. * de-allocate a channel
  1599. *
  1600. * @chan_hdl: Client handle previously obtained from
  1601. * gsi_alloc_channel
  1602. *
  1603. * This function can sleep
  1604. *
  1605. * @Return gsi_status
  1606. */
  1607. int gsi_dealloc_channel(unsigned long chan_hdl);
  1608. /**
  1609. * gsi_poll_channel - Peripheral should call this function to query for
  1610. * completed transfer descriptors.
  1611. *
  1612. * @chan_hdl: Client handle previously obtained from
  1613. * gsi_alloc_channel
  1614. * @notify: Information about the completed transfer if any
  1615. *
  1616. * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
  1617. * completed)
  1618. */
  1619. int gsi_poll_channel(unsigned long chan_hdl,
  1620. struct gsi_chan_xfer_notify *notify);
  1621. /**
  1622. * gsi_ring_evt_doorbell_napi - doorbell from NAPI context
  1623. * @chan_hdl: Client handle previously obtained from
  1624. * gsi_alloc_channel
  1625. *
  1626. */
  1627. void gsi_ring_evt_doorbell_polling_mode(unsigned long chan_hdl);
  1628. /**
  1629. * gsi_config_channel_mode - Peripheral should call this function
  1630. * to configure the channel mode.
  1631. *
  1632. * @chan_hdl: Client handle previously obtained from
  1633. * gsi_alloc_channel
  1634. * @mode: Mode to move the channel into
  1635. *
  1636. * @Return gsi_status
  1637. */
  1638. int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
  1639. /**
  1640. * gsi_queue_xfer - Peripheral should call this function
  1641. * to queue transfers on the given channel
  1642. *
  1643. * @chan_hdl: Client handle previously obtained from
  1644. * gsi_alloc_channel
  1645. * @num_xfers: Number of transfer in the array @ xfer
  1646. * @xfer: Array of num_xfers transfer descriptors
  1647. * @ring_db: If true, tell HW about these queued xfers
  1648. * If false, do not notify HW at this time
  1649. *
  1650. * @Return gsi_status
  1651. */
  1652. int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
  1653. struct gsi_xfer_elem *xfer, bool ring_db);
  1654. void gsi_debugfs_init(void);
  1655. uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
  1656. void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used);
  1657. /**
  1658. * gsi_register_device - Peripheral should call this function to
  1659. * register itself with GSI before invoking any other APIs
  1660. *
  1661. * @props: Peripheral properties
  1662. * @dev_hdl: Handle populated by GSI, opaque to client
  1663. *
  1664. * @Return -GSI_STATUS_AGAIN if request should be re-tried later
  1665. * other error codes for failure
  1666. */
  1667. int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
  1668. /**
  1669. * gsi_complete_clk_grant - Peripheral should call this function to
  1670. * grant the clock resource requested by GSI previously that could not
  1671. * be granted synchronously. GSI will release the clock resource using
  1672. * the rel_clk_cb when appropriate
  1673. *
  1674. * @dev_hdl: Client handle previously obtained from
  1675. * gsi_register_device
  1676. *
  1677. * @Return gsi_status
  1678. */
  1679. int gsi_complete_clk_grant(unsigned long dev_hdl);
  1680. /**
  1681. * gsi_write_device_scratch - Peripheral should call this function to
  1682. * write to the EE scratch area
  1683. *
  1684. * @dev_hdl: Client handle previously obtained from
  1685. * gsi_register_device
  1686. * @val: Value to write
  1687. *
  1688. * @Return gsi_status
  1689. */
  1690. int gsi_write_device_scratch(unsigned long dev_hdl,
  1691. struct gsi_device_scratch *val);
  1692. /**
  1693. * gsi_deregister_device - Peripheral should call this function to
  1694. * de-register itself with GSI
  1695. *
  1696. * @dev_hdl: Client handle previously obtained from
  1697. * gsi_register_device
  1698. * @force: When set to true, cleanup is performed even if there
  1699. * are in use resources like channels, event rings, etc.
  1700. * this would be used after GSI reset to recover from some
  1701. * fatal error
  1702. * When set to false, there must not exist any allocated
  1703. * channels and event rings.
  1704. *
  1705. * @Return gsi_status
  1706. */
  1707. int gsi_deregister_device(unsigned long dev_hdl, bool force);
  1708. /**
  1709. * gsi_write_evt_ring_scratch - Peripheral should call this function to
  1710. * write to the scratch area of the event ring context
  1711. *
  1712. * @evt_ring_hdl: Client handle previously obtained from
  1713. * gsi_alloc_evt_ring
  1714. * @val: Value to write
  1715. *
  1716. * @Return gsi_status
  1717. */
  1718. int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1719. union __packed gsi_evt_scratch val);
  1720. /**
  1721. * gsi_query_evt_ring_db_addr - Peripheral should call this function to
  1722. * query the physical addresses of the event ring doorbell registers
  1723. *
  1724. * @evt_ring_hdl: Client handle previously obtained from
  1725. * gsi_alloc_evt_ring
  1726. * @db_addr_wp_lsb: Physical address of doorbell register where the 32
  1727. * LSBs of the doorbell value should be written
  1728. * @db_addr_wp_msb: Physical address of doorbell register where the 32
  1729. * MSBs of the doorbell value should be written
  1730. *
  1731. * @Return gsi_status
  1732. */
  1733. int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
  1734. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
  1735. /**
  1736. * gsi_ring_evt_ring_db - Peripheral should call this function for
  1737. * ringing the event ring doorbell with given value
  1738. *
  1739. * @evt_ring_hdl: Client handle previously obtained from
  1740. * gsi_alloc_evt_ring
  1741. * @value: The value to be used for ringing the doorbell
  1742. *
  1743. * @Return gsi_status
  1744. */
  1745. int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value);
  1746. /**
  1747. * gsi_ring_ch_ring_db - Peripheral should call this function for
  1748. * ringing the channel ring doorbell with given value
  1749. *
  1750. * @chan_hdl: Client handle previously obtained from
  1751. * gsi_alloc_channel
  1752. * @value: The value to be used for ringing the doorbell
  1753. *
  1754. * @Return gsi_status
  1755. */
  1756. int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value);
  1757. /**
  1758. * gsi_reset_evt_ring - Peripheral should call this function to
  1759. * reset an event ring to recover from error state
  1760. *
  1761. * @evt_ring_hdl: Client handle previously obtained from
  1762. * gsi_alloc_evt_ring
  1763. *
  1764. * This function can sleep
  1765. *
  1766. * @Return gsi_status
  1767. */
  1768. int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
  1769. /**
  1770. * gsi_get_evt_ring_cfg - This function returns the current config
  1771. * of the specified event ring
  1772. *
  1773. * @evt_ring_hdl: Client handle previously obtained from
  1774. * gsi_alloc_evt_ring
  1775. * @props: where to copy properties to
  1776. * @scr: where to copy scratch info to
  1777. *
  1778. * @Return gsi_status
  1779. */
  1780. int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
  1781. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
  1782. /**
  1783. * gsi_set_evt_ring_cfg - This function applies the supplied config
  1784. * to the specified event ring.
  1785. *
  1786. * exclusive property of the event ring cannot be changed after
  1787. * gsi_alloc_evt_ring
  1788. *
  1789. * @evt_ring_hdl: Client handle previously obtained from
  1790. * gsi_alloc_evt_ring
  1791. * @props: the properties to apply
  1792. * @scr: the scratch info to apply
  1793. *
  1794. * @Return gsi_status
  1795. */
  1796. int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
  1797. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
  1798. /**
  1799. * gsi_write_channel_scratch - Peripheral should call this function to
  1800. * write to the scratch area of the channel context
  1801. *
  1802. * @chan_hdl: Client handle previously obtained from
  1803. * gsi_alloc_channel
  1804. * @val: Value to write
  1805. *
  1806. * @Return gsi_status
  1807. */
  1808. int gsi_write_channel_scratch(unsigned long chan_hdl,
  1809. union __packed gsi_channel_scratch val);
  1810. /**
  1811. * gsi_write_channel_scratch3_reg - Peripheral should call this function to
  1812. * write to the scratch3 reg area of the channel context
  1813. *
  1814. * @chan_hdl: Client handle previously obtained from
  1815. * gsi_alloc_channel
  1816. * @val: Value to write
  1817. *
  1818. * @Return gsi_status
  1819. */
  1820. int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
  1821. union __packed gsi_wdi_channel_scratch3_reg val);
  1822. /**
  1823. * gsi_write_channel_scratch2_reg - Peripheral should call this function to
  1824. * write to the scratch2 reg area of the channel context
  1825. *
  1826. * @chan_hdl: Client handle previously obtained from
  1827. * gsi_alloc_channel
  1828. * @val: Value to write
  1829. *
  1830. * @Return gsi_status
  1831. */
  1832. int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
  1833. union __packed gsi_wdi2_channel_scratch2_reg val);
  1834. /**
  1835. * gsi_write_wdi3_channel_scratch2_reg - Peripheral should call this function
  1836. * to write to the WDI3 scratch 3 register area of the channel context
  1837. *
  1838. * @chan_hdl: Client handle previously obtained from
  1839. * gsi_alloc_channel
  1840. * @val: Read value
  1841. *
  1842. * @Return gsi_status
  1843. */
  1844. int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  1845. union __packed gsi_wdi3_channel_scratch2_reg val);
  1846. /**
  1847. * gsi_read_channel_scratch - Peripheral should call this function to
  1848. * read to the scratch area of the channel context
  1849. *
  1850. * @chan_hdl: Client handle previously obtained from
  1851. * gsi_alloc_channel
  1852. * @val: Read value
  1853. *
  1854. * @Return gsi_status
  1855. */
  1856. int gsi_read_channel_scratch(unsigned long chan_hdl,
  1857. union __packed gsi_channel_scratch *val);
  1858. /**
  1859. * gsi_read_wdi3_channel_scratch2_reg - Peripheral should call this function to
  1860. * read to the WDI3 scratch 2 register area of the channel context
  1861. *
  1862. * @chan_hdl: Client handle previously obtained from
  1863. * gsi_alloc_channel
  1864. * @val: Read value
  1865. *
  1866. * @Return gsi_status
  1867. */
  1868. int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  1869. union __packed gsi_wdi3_channel_scratch2_reg *val);
  1870. /*
  1871. * gsi_pending_irq_type - Peripheral should call this function to
  1872. * check if there is any pending irq
  1873. *
  1874. * This function can sleep
  1875. *
  1876. * @Return gsi_irq_type
  1877. */
  1878. int gsi_pending_irq_type(void);
  1879. /**
  1880. * gsi_update_mhi_channel_scratch - MHI Peripheral should call this
  1881. * function to update the scratch area of the channel context. Updating
  1882. * will be by read-modify-write method, so non SWI fields will not be
  1883. * affected
  1884. *
  1885. * @chan_hdl: Client handle previously obtained from
  1886. * gsi_alloc_channel
  1887. * @mscr: MHI Channel Scratch value
  1888. *
  1889. * @Return gsi_status
  1890. */
  1891. int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
  1892. struct __packed gsi_mhi_channel_scratch mscr);
  1893. /**
  1894. * gsi_stop_channel - Peripheral should call this function to
  1895. * stop a channel. Stop will happen on a packet boundary
  1896. *
  1897. * @chan_hdl: Client handle previously obtained from
  1898. * gsi_alloc_channel
  1899. *
  1900. * This function can sleep
  1901. *
  1902. * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
  1903. * other error codes for failure
  1904. */
  1905. int gsi_stop_channel(unsigned long chan_hdl);
  1906. /**
  1907. * gsi_stop_db_channel - Peripheral should call this function to
  1908. * stop a channel when all transfer elements till the doorbell
  1909. * have been processed
  1910. *
  1911. * @chan_hdl: Client handle previously obtained from
  1912. * gsi_alloc_channel
  1913. *
  1914. * This function can sleep
  1915. *
  1916. * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
  1917. * other error codes for failure
  1918. */
  1919. int gsi_stop_db_channel(unsigned long chan_hdl);
  1920. /**
  1921. * gsi_query_channel_db_addr - Peripheral should call this function to
  1922. * query the physical addresses of the channel doorbell registers
  1923. *
  1924. * @chan_hdl: Client handle previously obtained from
  1925. * gsi_alloc_channel
  1926. * @db_addr_wp_lsb: Physical address of doorbell register where the 32
  1927. * LSBs of the doorbell value should be written
  1928. * @db_addr_wp_msb: Physical address of doorbell register where the 32
  1929. * MSBs of the doorbell value should be written
  1930. *
  1931. * @Return gsi_status
  1932. */
  1933. int gsi_query_channel_db_addr(unsigned long chan_hdl,
  1934. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
  1935. /**
  1936. * gsi_query_channel_info - Peripheral can call this function to query the
  1937. * channel and associated event ring (if any) status.
  1938. *
  1939. * @chan_hdl: Client handle previously obtained from
  1940. * gsi_alloc_channel
  1941. * @info: Where to read the values into
  1942. *
  1943. * @Return gsi_status
  1944. */
  1945. int gsi_query_channel_info(unsigned long chan_hdl,
  1946. struct gsi_chan_info *info);
  1947. /**
  1948. * gsi_is_channel_empty - Peripheral can call this function to query if
  1949. * the channel is empty. This is only applicable to GPI. "Empty" means
  1950. * GSI has consumed all descriptors for a TO_GSI channel and SW has
  1951. * processed all completed descriptors for a FROM_GSI channel.
  1952. *
  1953. * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
  1954. * @is_empty: set by GSI based on channel emptiness
  1955. *
  1956. * @Return gsi_status
  1957. */
  1958. int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
  1959. /**
  1960. * gsi_is_event_pending - Returns true if there is at least one event in the
  1961. * provided event ring which wasn't processed.
  1962. *
  1963. * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
  1964. *
  1965. * @Return true if an event is pending, else false
  1966. */
  1967. bool gsi_is_event_pending(unsigned long chan_hdl);
  1968. /**
  1969. * gsi_get_channel_cfg - This function returns the current config
  1970. * of the specified channel
  1971. *
  1972. * @chan_hdl: Client handle previously obtained from
  1973. * gsi_alloc_channel
  1974. * @props: where to copy properties to
  1975. * @scr: where to copy scratch info to
  1976. *
  1977. * @Return gsi_status
  1978. */
  1979. int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  1980. union gsi_channel_scratch *scr);
  1981. /**
  1982. * gsi_set_channel_cfg - This function applies the supplied config
  1983. * to the specified channel
  1984. *
  1985. * ch_id and evt_ring_hdl of the channel cannot be changed after
  1986. * gsi_alloc_channel
  1987. *
  1988. * @chan_hdl: Client handle previously obtained from
  1989. * gsi_alloc_channel
  1990. * @props: the properties to apply
  1991. * @scr: the scratch info to apply
  1992. *
  1993. * @Return gsi_status
  1994. */
  1995. int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  1996. union gsi_channel_scratch *scr);
  1997. /**
  1998. * gsi_poll_n_channel - Peripheral should call this function to query for
  1999. * completed transfer descriptors.
  2000. *
  2001. * @chan_hdl: Client handle previously obtained from
  2002. * gsi_alloc_channel
  2003. * @notify: Information about the completed transfer if any
  2004. * @expected_num: Number of descriptor we want to poll each time.
  2005. * @actual_num: Actual number of descriptor we polled successfully.
  2006. *
  2007. * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
  2008. * completed)
  2009. */
  2010. int gsi_poll_n_channel(unsigned long chan_hdl,
  2011. struct gsi_chan_xfer_notify *notify,
  2012. int expected_num, int *actual_num);
  2013. /**
  2014. * gsi_start_xfer - Peripheral should call this function to
  2015. * inform HW about queued xfers
  2016. *
  2017. * @chan_hdl: Client handle previously obtained from
  2018. * gsi_alloc_channel
  2019. *
  2020. * @Return gsi_status
  2021. */
  2022. int gsi_start_xfer(unsigned long chan_hdl);
  2023. /**
  2024. * gsi_configure_regs - Peripheral should call this function
  2025. * to configure the GSI registers before/after the FW is
  2026. * loaded but before it is enabled.
  2027. *
  2028. * @per_base_addr: Base address of the peripheral using GSI
  2029. * @ver: GSI core version
  2030. *
  2031. * @Return gsi_status
  2032. */
  2033. int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver);
  2034. /**
  2035. * gsi_enable_fw - Peripheral should call this function
  2036. * to enable the GSI FW after the FW has been loaded to the SRAM.
  2037. *
  2038. * @gsi_base_addr: Base address of GSI register space
  2039. * @gsi_size: Mapping size of the GSI register space
  2040. * @ver: GSI core version
  2041. * @Return gsi_status
  2042. */
  2043. int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
  2044. /**
  2045. * gsi_get_inst_ram_offset_and_size - Peripheral should call this function
  2046. * to get instruction RAM base address offset and size. Peripheral typically
  2047. * uses this info to load GSI FW into the IRAM.
  2048. *
  2049. * @base_offset:[OUT] - IRAM base offset address
  2050. * @size: [OUT] - IRAM size
  2051. * @ver: GSI core version
  2052. * @Return none
  2053. */
  2054. void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
  2055. unsigned long *size, enum gsi_ver ver);
  2056. /**
  2057. * gsi_halt_channel_ee - Peripheral should call this function
  2058. * to stop other EE's channel. This is usually used in SSR clean
  2059. *
  2060. * @chan_idx: Virtual channel index
  2061. * @ee: EE
  2062. * @code: [out] response code for operation
  2063. * @Return gsi_status
  2064. */
  2065. int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
  2066. /**
  2067. * gsi_wdi3_write_evt_ring_db - write event ring doorbell address
  2068. *
  2069. * @chan_hdl: gsi channel handle
  2070. * @Return gsi_status
  2071. */
  2072. void gsi_wdi3_write_evt_ring_db(unsigned long chan_hdl, uint32_t db_addr_low,
  2073. uint32_t db_addr_high);
  2074. /**
  2075. * gsi_get_refetch_reg - get WP/RP value from re_fetch register
  2076. *
  2077. * @chan_hdl: gsi channel handle
  2078. * @is_rp: rp or wp
  2079. */
  2080. int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp);
  2081. /**
  2082. * gsi_get_drop_stats - get drop stats by GSI
  2083. *
  2084. * @ep_id: ep index
  2085. * @scratch_id: drop stats on which scratch register
  2086. */
  2087. int gsi_get_drop_stats(unsigned long ep_id, int scratch_id);
  2088. /**
  2089. * gsi_wdi3_dump_register - dump wdi3 related gsi registers
  2090. *
  2091. * @chan_hdl: gsi channel handle
  2092. */
  2093. void gsi_wdi3_dump_register(unsigned long chan_hdl);
  2094. /**
  2095. * gsi_map_base - Peripheral should call this function to configure
  2096. * access to the GSI registers.
  2097. * @gsi_base_addr: Base address of GSI register space
  2098. * @gsi_size: Mapping size of the GSI register space
  2099. * @ver: The appropriate GSI version enum
  2100. *
  2101. * @Return gsi_status
  2102. */
  2103. int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
  2104. /**
  2105. * gsi_unmap_base - Peripheral should call this function to undo the
  2106. * effects of gsi_map_base
  2107. *
  2108. * @Return gsi_status
  2109. */
  2110. int gsi_unmap_base(void);
  2111. /**
  2112. * gsi_map_virtual_ch_to_per_ep - Peripheral should call this function
  2113. * to configure each GSI virtual channel with the per endpoint index.
  2114. *
  2115. * @ee: The ee to be used
  2116. * @chan_num: The channel to be used
  2117. * @per_ep_index: value to assign
  2118. *
  2119. * @Return gsi_status
  2120. */
  2121. int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index);
  2122. /**
  2123. * gsi_alloc_channel_ee - Peripheral should call this function
  2124. * to alloc other EE's channel. This is usually done in bootup to allocate all
  2125. * chnnels.
  2126. *
  2127. * @chan_idx: Virtual channel index
  2128. * @ee: EE
  2129. * @code: [out] response code for operation
  2130. * @Return gsi_status
  2131. */
  2132. int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
  2133. /**
  2134. * gsi_enable_flow_control_ee - Peripheral should call this function
  2135. * to enable flow control other EE's channel. This is usually done in USB
  2136. * connent and SSR scenarios.
  2137. *
  2138. * @chan_idx: Virtual channel index
  2139. * @ee: EE
  2140. * @code: [out] response code for operation
  2141. * @Return gsi_status
  2142. */
  2143. int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  2144. int *code);
  2145. /**
  2146. * gsi_query_msi_addr - get gsi channel msi address
  2147. *
  2148. * @chan_id: channel id
  2149. * @addr: [out] channel msi address
  2150. *
  2151. * @Return gsi_status
  2152. */
  2153. int gsi_query_msi_addr(unsigned long chan_hdl, phys_addr_t *addr);
  2154. /**
  2155. * gsi_dump_ch_info - channel information.
  2156. *
  2157. * @chan_id: channel id
  2158. *
  2159. * @Return void
  2160. */
  2161. void gsi_dump_ch_info(unsigned long chan_hdl);
  2162. /**
  2163. * gsi_get_hw_profiling_stats() - Query GSI HW profiling stats
  2164. * @stats: [out] stats blob from client populated by driver
  2165. *
  2166. * Returns: 0 on success, negative on failure
  2167. *
  2168. */
  2169. int gsi_get_hw_profiling_stats(struct gsi_hw_profiling_data *stats);
  2170. /**
  2171. * gsi_get_fw_version() - Query GSI FW version
  2172. * @ver: [out] ver blob from client populated by driver
  2173. *
  2174. * Returns: 0 on success, negative on failure
  2175. *
  2176. */
  2177. int gsi_get_fw_version(struct gsi_fw_version *ver);
  2178. int gsi_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  2179. bool enable, bool prmy_scnd_fc, int *code);
  2180. int gsi_query_flow_control_state_ee(unsigned int chan_idx, unsigned int ee,
  2181. bool prmy_scnd_fc, int *code);
  2182. /*
  2183. * Here is a typical sequence of calls
  2184. *
  2185. * gsi_register_device
  2186. *
  2187. * gsi_write_device_scratch (if the protocol needs this)
  2188. *
  2189. * gsi_alloc_evt_ring (for as many event rings as needed)
  2190. * gsi_write_evt_ring_scratch
  2191. *
  2192. * gsi_alloc_channel (for as many channels as needed; channels can have
  2193. * no event ring, an exclusive event ring or a shared event ring)
  2194. * gsi_write_channel_scratch
  2195. * gsi_read_channel_scratch
  2196. * gsi_start_channel
  2197. * gsi_queue_xfer/gsi_start_xfer
  2198. * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
  2199. * xfer completions)
  2200. * gsi_stop_db_channel/gsi_stop_channel
  2201. *
  2202. * gsi_dealloc_channel
  2203. *
  2204. * gsi_dealloc_evt_ring
  2205. *
  2206. * gsi_deregister_device
  2207. *
  2208. */
  2209. #endif