gsi.h 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #ifndef GSI_H
  6. #define GSI_H
  7. #include <linux/device.h>
  8. #include <linux/types.h>
  9. #include <linux/completion.h>
  10. #include <linux/mutex.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/msm_gsi.h>
  13. #include <linux/errno.h>
  14. #include <linux/ipc_logging.h>
  15. /*
  16. * The following for adding code (ie. for EMULATION) not found on x86.
  17. */
  18. #if defined(CONFIG_IPA_EMULATION)
  19. # include "gsi_emulation_stubs.h"
  20. #endif
  21. #define GSI_ASSERT() \
  22. BUG()
  23. #define GSI_CHAN_MAX 31
  24. #define GSI_EVT_RING_MAX 24
  25. #define GSI_NO_EVT_ERINDEX 31
  26. #define gsi_readl(c) (readl_relaxed(c))
  27. #define gsi_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); })
  28. #define GSI_IPC_LOGGING(buf, fmt, args...) \
  29. do { \
  30. if (buf) \
  31. ipc_log_string((buf), fmt, __func__, __LINE__, \
  32. ## args); \
  33. } while (0)
  34. #define GSIDBG(fmt, args...) \
  35. do { \
  36. dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  37. ## args);\
  38. if (gsi_ctx) { \
  39. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
  40. "%s:%d " fmt, ## args); \
  41. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  42. "%s:%d " fmt, ## args); \
  43. } \
  44. } while (0)
  45. #define GSIDBG_LOW(fmt, args...) \
  46. do { \
  47. dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  48. ## args);\
  49. if (gsi_ctx) { \
  50. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  51. "%s:%d " fmt, ## args); \
  52. } \
  53. } while (0)
  54. #define GSIERR(fmt, args...) \
  55. do { \
  56. dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  57. ## args);\
  58. if (gsi_ctx) { \
  59. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
  60. "%s:%d " fmt, ## args); \
  61. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  62. "%s:%d " fmt, ## args); \
  63. } \
  64. } while (0)
  65. #define GSI_IPC_LOG_PAGES 50
  66. enum gsi_ver {
  67. GSI_VER_ERR = 0,
  68. GSI_VER_1_0 = 1,
  69. GSI_VER_1_2 = 2,
  70. GSI_VER_1_3 = 3,
  71. GSI_VER_2_0 = 4,
  72. GSI_VER_2_2 = 5,
  73. GSI_VER_2_5 = 6,
  74. GSI_VER_2_7 = 7,
  75. GSI_VER_2_9 = 8,
  76. GSI_VER_2_11 = 9,
  77. GSI_VER_MAX,
  78. };
  79. enum gsi_status {
  80. GSI_STATUS_SUCCESS = 0,
  81. GSI_STATUS_ERROR = 1,
  82. GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
  83. GSI_STATUS_RING_EMPTY = 3,
  84. GSI_STATUS_RES_ALLOC_FAILURE = 4,
  85. GSI_STATUS_BAD_STATE = 5,
  86. GSI_STATUS_INVALID_PARAMS = 6,
  87. GSI_STATUS_UNSUPPORTED_OP = 7,
  88. GSI_STATUS_NODEV = 8,
  89. GSI_STATUS_POLL_EMPTY = 9,
  90. GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
  91. GSI_STATUS_TIMED_OUT = 11,
  92. GSI_STATUS_AGAIN = 12,
  93. GSI_STATUS_PENDING_IRQ = 13,
  94. };
  95. enum gsi_intr_type {
  96. GSI_INTR_MSI = 0x0,
  97. GSI_INTR_IRQ = 0x1
  98. };
  99. enum gsi_evt_err {
  100. GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
  101. GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
  102. GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
  103. GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
  104. };
  105. /**
  106. * gsi_evt_err_notify - event ring error callback info
  107. *
  108. * @user_data: cookie supplied in gsi_alloc_evt_ring
  109. * @evt_id: type of error
  110. * @err_desc: more info about the error
  111. *
  112. */
  113. struct gsi_evt_err_notify {
  114. void *user_data;
  115. enum gsi_evt_err evt_id;
  116. uint16_t err_desc;
  117. };
  118. enum gsi_evt_chtype {
  119. GSI_EVT_CHTYPE_MHI_EV = 0x0,
  120. GSI_EVT_CHTYPE_XHCI_EV = 0x1,
  121. GSI_EVT_CHTYPE_GPI_EV = 0x2,
  122. GSI_EVT_CHTYPE_XDCI_EV = 0x3,
  123. GSI_EVT_CHTYPE_WDI2_EV = 0x4,
  124. GSI_EVT_CHTYPE_GCI_EV = 0x5,
  125. GSI_EVT_CHTYPE_WDI3_EV = 0x6,
  126. GSI_EVT_CHTYPE_MHIP_EV = 0x7,
  127. GSI_EVT_CHTYPE_AQC_EV = 0x8,
  128. GSI_EVT_CHTYPE_11AD_EV = 0x9,
  129. };
  130. enum gsi_evt_ring_elem_size {
  131. GSI_EVT_RING_RE_SIZE_4B = 4,
  132. GSI_EVT_RING_RE_SIZE_8B = 8,
  133. GSI_EVT_RING_RE_SIZE_16B = 16,
  134. GSI_EVT_RING_RE_SIZE_32B = 32,
  135. };
  136. /**
  137. * gsi_evt_ring_props - Event ring related properties
  138. *
  139. * @intf: interface type (of the associated channel)
  140. * @intr: interrupt type
  141. * @re_size: size of event ring element
  142. * @ring_len: length of ring in bytes (must be integral multiple of
  143. * re_size)
  144. * @ring_base_addr: physical base address of ring. Address must be aligned to
  145. * ring_len rounded to power of two
  146. * @ring_base_vaddr: virtual base address of ring (set to NULL when not
  147. * applicable)
  148. * @int_modt: cycles base interrupt moderation (32KHz clock)
  149. * @int_modc: interrupt moderation packet counter
  150. * @intvec: write data for MSI write
  151. * @msi_addr: MSI address
  152. * @rp_update_addr: physical address to which event read pointer should be
  153. * written on every event generation. must be set to 0 when
  154. * no update is desdired
  155. * @rp_update_vaddr: virtual address of event ring read pointer (set to NULL
  156. * when not applicable)
  157. * @exclusive: if true, only one GSI channel can be associated with this
  158. * event ring. if false, the event ring can be shared among
  159. * multiple GSI channels but in that case no polling
  160. * (GSI_CHAN_MODE_POLL) is supported on any of those channels
  161. * @err_cb: error notification callback
  162. * @user_data: cookie used for error notifications
  163. * @evchid_valid: is evchid valid?
  164. * @evchid: the event ID that is being specifically requested (this is
  165. * relevant for MHI where doorbell routing requires ERs to be
  166. * physically contiguous)
  167. * @gsi_read_event_ring_rp: function reads the value of the event ring RP.
  168. */
  169. struct gsi_evt_ring_props {
  170. enum gsi_evt_chtype intf;
  171. enum gsi_intr_type intr;
  172. enum gsi_evt_ring_elem_size re_size;
  173. uint16_t ring_len;
  174. uint64_t ring_base_addr;
  175. void *ring_base_vaddr;
  176. uint16_t int_modt;
  177. uint8_t int_modc;
  178. uint32_t intvec;
  179. uint64_t msi_addr;
  180. uint64_t rp_update_addr;
  181. void *rp_update_vaddr;
  182. bool exclusive;
  183. void (*err_cb)(struct gsi_evt_err_notify *notify);
  184. void *user_data;
  185. bool evchid_valid;
  186. uint8_t evchid;
  187. uint64_t (*gsi_read_event_ring_rp)(struct gsi_evt_ring_props *props,
  188. uint8_t id, int ee);
  189. };
  190. enum gsi_chan_mode {
  191. GSI_CHAN_MODE_CALLBACK = 0x0,
  192. GSI_CHAN_MODE_POLL = 0x1,
  193. };
  194. enum gsi_chan_prot {
  195. GSI_CHAN_PROT_MHI = 0x0,
  196. GSI_CHAN_PROT_XHCI = 0x1,
  197. GSI_CHAN_PROT_GPI = 0x2,
  198. GSI_CHAN_PROT_XDCI = 0x3,
  199. GSI_CHAN_PROT_WDI2 = 0x4,
  200. GSI_CHAN_PROT_GCI = 0x5,
  201. GSI_CHAN_PROT_WDI3 = 0x6,
  202. GSI_CHAN_PROT_MHIP = 0x7,
  203. GSI_CHAN_PROT_AQC = 0x8,
  204. GSI_CHAN_PROT_11AD = 0x9,
  205. };
  206. enum gsi_max_prefetch {
  207. GSI_ONE_PREFETCH_SEG = 0x0,
  208. GSI_TWO_PREFETCH_SEG = 0x1
  209. };
  210. enum gsi_per_evt {
  211. GSI_PER_EVT_GLOB_ERROR,
  212. GSI_PER_EVT_GLOB_GP1,
  213. GSI_PER_EVT_GLOB_GP2,
  214. GSI_PER_EVT_GLOB_GP3,
  215. GSI_PER_EVT_GENERAL_BREAK_POINT,
  216. GSI_PER_EVT_GENERAL_BUS_ERROR,
  217. GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
  218. GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
  219. };
  220. /**
  221. * gsi_per_notify - Peripheral callback info
  222. *
  223. * @user_data: cookie supplied in gsi_register_device
  224. * @evt_id: type of notification
  225. * @err_desc: error related information
  226. *
  227. */
  228. struct gsi_per_notify {
  229. void *user_data;
  230. enum gsi_per_evt evt_id;
  231. union {
  232. uint16_t err_desc;
  233. } data;
  234. };
  235. /**
  236. * gsi_per_props - Peripheral related properties
  237. *
  238. * @gsi: GSI core version
  239. * @ee: EE where this driver and peripheral driver runs
  240. * @intr: control interrupt type
  241. * @intvec: write data for MSI write
  242. * @msi_addr: MSI address
  243. * @irq: IRQ number
  244. * @phys_addr: physical address of GSI block
  245. * @size: register size of GSI block
  246. * @emulator_intcntrlr_addr: the location of emulator's interrupt control block
  247. * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr
  248. * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr
  249. * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
  250. * @mhi_er_id_limits: MHI event ring start and end ids
  251. * @notify_cb: general notification callback
  252. * @req_clk_cb: callback to request peripheral clock
  253. * granted should be set to true if request is completed
  254. * synchronously, false otherwise (peripheral needs
  255. * to call gsi_complete_clk_grant later when request is
  256. * completed)
  257. * if this callback is not provided, then GSI will assume
  258. * peripheral is clocked at all times
  259. * @rel_clk_cb: callback to release peripheral clock
  260. * @user_data: cookie used for notifications
  261. * @clk_status_cb: callback to update the current msm bus clock vote
  262. * @enable_clk_bug_on: enable IPA clock for dump saving before assert
  263. * @skip_ieob_mask_wa: flag for skipping ieob_mask_wa
  264. * All the callbacks are in interrupt context
  265. *
  266. */
  267. struct gsi_per_props {
  268. enum gsi_ver ver;
  269. unsigned int ee;
  270. enum gsi_intr_type intr;
  271. uint32_t intvec;
  272. uint64_t msi_addr;
  273. unsigned int irq;
  274. phys_addr_t phys_addr;
  275. unsigned long size;
  276. phys_addr_t emulator_intcntrlr_addr;
  277. unsigned long emulator_intcntrlr_size;
  278. irq_handler_t emulator_intcntrlr_client_isr;
  279. bool mhi_er_id_limits_valid;
  280. uint32_t mhi_er_id_limits[2];
  281. void (*notify_cb)(struct gsi_per_notify *notify);
  282. void (*req_clk_cb)(void *user_data, bool *granted);
  283. int (*rel_clk_cb)(void *user_data);
  284. void *user_data;
  285. int (*clk_status_cb)(void);
  286. void (*enable_clk_bug_on)(void);
  287. bool skip_ieob_mask_wa;
  288. };
  289. enum gsi_chan_evt {
  290. GSI_CHAN_EVT_INVALID = 0x0,
  291. GSI_CHAN_EVT_SUCCESS = 0x1,
  292. GSI_CHAN_EVT_EOT = 0x2,
  293. GSI_CHAN_EVT_OVERFLOW = 0x3,
  294. GSI_CHAN_EVT_EOB = 0x4,
  295. GSI_CHAN_EVT_OOB = 0x5,
  296. GSI_CHAN_EVT_DB_MODE = 0x6,
  297. GSI_CHAN_EVT_UNDEFINED = 0x10,
  298. GSI_CHAN_EVT_RE_ERROR = 0x11,
  299. };
  300. /**
  301. * gsi_chan_xfer_veid - Virtual Channel ID
  302. *
  303. * @GSI_VEID_0: transfer completed for VEID 0
  304. * @GSI_VEID_1: transfer completed for VEID 1
  305. * @GSI_VEID_2: transfer completed for VEID 2
  306. * @GSI_VEID_3: transfer completed for VEID 3
  307. * @GSI_VEID_DEFAULT: used when veid is invalid
  308. */
  309. enum gsi_chan_xfer_veid {
  310. GSI_VEID_0 = 0,
  311. GSI_VEID_1 = 1,
  312. GSI_VEID_2 = 2,
  313. GSI_VEID_3 = 3,
  314. GSI_VEID_DEFAULT,
  315. GSI_VEID_MAX
  316. };
  317. /**
  318. * gsi_chan_xfer_notify - Channel callback info
  319. *
  320. * @chan_user_data: cookie supplied in gsi_alloc_channel
  321. * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
  322. * event to be generated
  323. * @evt_id: type of event triggered by the associated TRE
  324. * (corresponding to xfer_user_data)
  325. * @bytes_xfered: number of bytes transferred by the associated TRE
  326. * (corresponding to xfer_user_data)
  327. * @veid: virtual endpoint id. Valid for GCI completions only
  328. *
  329. */
  330. struct gsi_chan_xfer_notify {
  331. void *chan_user_data;
  332. void *xfer_user_data;
  333. enum gsi_chan_evt evt_id;
  334. uint16_t bytes_xfered;
  335. uint8_t veid;
  336. };
  337. enum gsi_chan_err {
  338. GSI_CHAN_INVALID_TRE_ERR = 0x0,
  339. GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
  340. GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
  341. GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
  342. GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
  343. GSI_CHAN_HWO_1_ERR = 0x5
  344. };
  345. /**
  346. * gsi_chan_err_notify - Channel general callback info
  347. *
  348. * @chan_user_data: cookie supplied in gsi_alloc_channel
  349. * @evt_id: type of error
  350. * @err_desc: more info about the error
  351. *
  352. */
  353. struct gsi_chan_err_notify {
  354. void *chan_user_data;
  355. enum gsi_chan_err evt_id;
  356. uint16_t err_desc;
  357. };
  358. enum gsi_chan_ring_elem_size {
  359. GSI_CHAN_RE_SIZE_4B = 4,
  360. GSI_CHAN_RE_SIZE_8B = 8,
  361. GSI_CHAN_RE_SIZE_16B = 16,
  362. GSI_CHAN_RE_SIZE_32B = 32,
  363. GSI_CHAN_RE_SIZE_64B = 64,
  364. };
  365. enum gsi_chan_use_db_eng {
  366. GSI_CHAN_DIRECT_MODE = 0x0,
  367. GSI_CHAN_DB_MODE = 0x1,
  368. };
  369. /**
  370. * gsi_chan_props - Channel related properties
  371. *
  372. * @prot: interface type
  373. * @dir: channel direction
  374. * @ch_id: virtual channel ID
  375. * @evt_ring_hdl: handle of associated event ring. set to ~0 if no
  376. * event ring associated
  377. * @re_size: size of channel ring element
  378. * @ring_len: length of ring in bytes (must be integral multiple of
  379. * re_size)
  380. * @max_re_expected: maximal number of ring elements expected to be queued.
  381. * used for data path statistics gathering. if 0 provided
  382. * ring_len / re_size will be used.
  383. * @ring_base_addr: physical base address of ring. Address must be aligned to
  384. * ring_len rounded to power of two
  385. * @ring_base_vaddr: virtual base address of ring (set to NULL when not
  386. * applicable)
  387. * @use_db_eng: 0 => direct mode (doorbells are written directly to RE
  388. * engine)
  389. * 1 => DB mode (doorbells are written to DB engine)
  390. * @max_prefetch: limit number of pre-fetch segments for channel
  391. * @low_weight: low channel weight (priority of channel for RE engine
  392. * round robin algorithm); must be >= 1
  393. * @empty_lvl_threshold:
  394. * The thershold number of free entries available in the
  395. * receiving fifos of GSI-peripheral. If Smart PF mode
  396. * is used, REE will fetch/send new TRE to peripheral only
  397. * if peripheral's empty_level_count is higher than
  398. * EMPTY_LVL_THRSHOLD defined for this channel
  399. * @xfer_cb: transfer notification callback, this callback happens
  400. * on event boundaries
  401. *
  402. * e.g. 1
  403. *
  404. * out TD with 3 REs
  405. *
  406. * RE1: EOT=0, EOB=0, CHAIN=1;
  407. * RE2: EOT=0, EOB=0, CHAIN=1;
  408. * RE3: EOT=1, EOB=0, CHAIN=0;
  409. *
  410. * the callback will be triggered for RE3 using the
  411. * xfer_user_data of that RE
  412. *
  413. * e.g. 2
  414. *
  415. * in REs
  416. *
  417. * RE1: EOT=1, EOB=0, CHAIN=0;
  418. * RE2: EOT=1, EOB=0, CHAIN=0;
  419. * RE3: EOT=1, EOB=0, CHAIN=0;
  420. *
  421. * received packet consumes all of RE1, RE2 and part of RE3
  422. * for EOT condition. there will be three callbacks in below
  423. * order
  424. *
  425. * callback for RE1 using GSI_CHAN_EVT_OVERFLOW
  426. * callback for RE2 using GSI_CHAN_EVT_OVERFLOW
  427. * callback for RE3 using GSI_CHAN_EVT_EOT
  428. *
  429. * @err_cb: error notification callback
  430. * @cleanup_cb; cleanup rx-pkt/skb callback
  431. * @chan_user_data: cookie used for notifications
  432. *
  433. * All the callbacks are in interrupt context
  434. *
  435. */
  436. struct gsi_chan_props {
  437. enum gsi_chan_prot prot;
  438. enum gsi_chan_dir dir;
  439. uint8_t ch_id;
  440. unsigned long evt_ring_hdl;
  441. enum gsi_chan_ring_elem_size re_size;
  442. uint16_t ring_len;
  443. uint16_t max_re_expected;
  444. uint64_t ring_base_addr;
  445. uint8_t db_in_bytes;
  446. void *ring_base_vaddr;
  447. enum gsi_chan_use_db_eng use_db_eng;
  448. enum gsi_max_prefetch max_prefetch;
  449. uint8_t low_weight;
  450. enum gsi_prefetch_mode prefetch_mode;
  451. uint8_t empty_lvl_threshold;
  452. void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
  453. void (*err_cb)(struct gsi_chan_err_notify *notify);
  454. void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data);
  455. void *chan_user_data;
  456. };
  457. enum gsi_xfer_flag {
  458. GSI_XFER_FLAG_CHAIN = 0x1,
  459. GSI_XFER_FLAG_EOB = 0x100,
  460. GSI_XFER_FLAG_EOT = 0x200,
  461. GSI_XFER_FLAG_BEI = 0x400
  462. };
  463. enum gsi_xfer_elem_type {
  464. GSI_XFER_ELEM_DATA,
  465. GSI_XFER_ELEM_IMME_CMD,
  466. GSI_XFER_ELEM_NOP,
  467. };
  468. /**
  469. * gsi_gpi_channel_scratch - GPI protocol SW config area of
  470. * channel scratch
  471. *
  472. * @dl_nlo_channel: Whether this is DL NLO Channel or not? Relevant for
  473. * GSI 2.5 and above where DL NLO introduced.
  474. * @max_outstanding_tre: Used for the prefetch management sequence by the
  475. * sequencer. Defines the maximum number of allowed
  476. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  477. * prefetch will be limited by this configuration. It
  478. * is suggested to configure this value to IPA_IF
  479. * channel TLV queue size times element size. To disable
  480. * the feature in doorbell mode (DB Mode=1). Maximum
  481. * outstanding TREs should be set to 64KB
  482. * (or any value larger or equal to ring length . RLEN)
  483. * The field is irrelevant starting GSI 2.5 where smart
  484. * prefetch implemented by the H/W.
  485. * @outstanding_threshold: Used for the prefetch management sequence by the
  486. * sequencer. Defines the threshold (in Bytes) as to when
  487. * to update the channel doorbell. Should be smaller than
  488. * Maximum outstanding TREs. value. It is suggested to
  489. * configure this value to 2 * element size.
  490. * The field is irrelevant starting GSI 2.5 where smart
  491. * prefetch implemented by the H/W.
  492. */
  493. struct __packed gsi_gpi_channel_scratch {
  494. uint64_t dl_nlo_channel:1; /* Relevant starting GSI 2.5 */
  495. uint64_t resvd1:63;
  496. uint32_t resvd2:16;
  497. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  498. uint32_t resvd3:16;
  499. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  500. };
  501. /**
  502. * gsi_mhi_channel_scratch - MHI protocol SW config area of
  503. * channel scratch
  504. *
  505. * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines
  506. * address in host from which channel write pointer
  507. * should be read in polling mode
  508. * @assert_bit40: 1: bit #41 in address should be asserted upon
  509. * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
  510. * transfers)
  511. * 0: bit #41 in address should be deasserted upon
  512. * IPA_IF.ProcessDescriptor routine (for non-MHI over
  513. * PCIe transfers)
  514. * @polling_configuration: Uplink channels: Defines timer to poll on MHI
  515. * context. Range: 1 to 31 milliseconds.
  516. * Downlink channel: Defines transfer ring buffer
  517. * availability threshold to poll on MHI context in
  518. * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
  519. * elements. E.g., value of 2 indicates 16 ring elements.
  520. * Valid only when Burst Mode Enabled is set to 1
  521. * @burst_mode_enabled: 0: Burst mode is disabled for this channel
  522. * 1: Burst mode is enabled for this channel
  523. * @polling_mode: 0: the channel is not in polling mode, meaning the
  524. * host should ring DBs.
  525. * 1: the channel is in polling mode, meaning the host
  526. * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
  527. * ring elements.
  528. * should not ring DBs until notified of DB mode/OOB mode
  529. * @max_outstanding_tre: Used for the prefetch management sequence by the
  530. * sequencer. Defines the maximum number of allowed
  531. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  532. * prefetch will be limited by this configuration. It
  533. * is suggested to configure this value to IPA_IF
  534. * channel TLV queue size times element size.
  535. * To disable the feature in doorbell mode (DB Mode=1).
  536. * Maximum outstanding TREs should be set to 64KB
  537. * (or any value larger or equal to ring length . RLEN)
  538. * The field is irrelevant starting GSI 2.5 where smart
  539. * prefetch implemented by the H/W.
  540. * @outstanding_threshold: Used for the prefetch management sequence by the
  541. * sequencer. Defines the threshold (in Bytes) as to when
  542. * to update the channel doorbell. Should be smaller than
  543. * Maximum outstanding TREs. value. It is suggested to
  544. * configure this value to min(TLV_FIFO_SIZE/2,8) *
  545. * element size.
  546. * The field is irrelevant starting GSI 2.5 where smart
  547. * prefetch implemented by the H/W.
  548. */
  549. struct __packed gsi_mhi_channel_scratch {
  550. uint64_t mhi_host_wp_addr;
  551. uint32_t rsvd1:1;
  552. uint32_t assert_bit40:1;
  553. uint32_t polling_configuration:5;
  554. uint32_t burst_mode_enabled:1;
  555. uint32_t polling_mode:1;
  556. uint32_t oob_mod_threshold:5;
  557. uint32_t resvd2:2;
  558. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  559. uint32_t resvd3:16;
  560. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  561. };
  562. /**
  563. * gsi_xdci_channel_scratch - xDCI protocol SW config area of
  564. * channel scratch
  565. *
  566. * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi
  567. * configuration). Must be aligned to Max USB Packet Size
  568. * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
  569. * transfer resource index for the transfer, which was
  570. * returned in response to the Start Transfer command.
  571. * This field is used for "Update Transfer" command
  572. * @last_trb_addr: Address (LSB - based on alignment restrictions) of
  573. * last TRB in queue. Used to identify rollover case
  574. * @depcmd_low_addr: Used to generate "Update Transfer" command
  575. * @max_outstanding_tre: Used for the prefetch management sequence by the
  576. * sequencer. Defines the maximum number of allowed
  577. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  578. * prefetch will be limited by this configuration. It
  579. * is suggested to configure this value to IPA_IF
  580. * channel TLV queue size times element size.
  581. * To disable the feature in doorbell mode (DB Mode=1)
  582. * Maximum outstanding TREs should be set to 64KB
  583. * (or any value larger or equal to ring length . RLEN)
  584. * The field is irrelevant starting GSI 2.5 where smart
  585. * prefetch implemented by the H/W.
  586. * @depcmd_hi_addr: Used to generate "Update Transfer" command
  587. * @outstanding_threshold: Used for the prefetch management sequence by the
  588. * sequencer. Defines the threshold (in Bytes) as to when
  589. * to update the channel doorbell. Should be smaller than
  590. * Maximum outstanding TREs. value. It is suggested to
  591. * configure this value to 2 * element size. for MBIM the
  592. * suggested configuration is the element size.
  593. * The field is irrelevant starting GSI 2.5 where smart
  594. * prefetch implemented by the H/W.
  595. */
  596. struct __packed gsi_xdci_channel_scratch {
  597. uint32_t last_trb_addr:16;
  598. uint32_t resvd1:4;
  599. uint32_t xferrscidx:7;
  600. uint32_t const_buffer_size:5;
  601. uint32_t depcmd_low_addr;
  602. uint32_t depcmd_hi_addr:8;
  603. uint32_t resvd2:8;
  604. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  605. uint32_t resvd3:16;
  606. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  607. };
  608. /**
  609. * gsi_wdi_channel_scratch - WDI protocol SW config area of
  610. * channel scratch
  611. *
  612. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  613. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  614. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  615. * N is the number of packets that IPA will
  616. * process before Wifi transfer ring Ri will
  617. * be updated.
  618. * @update_ri_moderation_counter: This field is incremented with each TRE
  619. * processed in MCS.
  620. * @wdi_rx_tre_proc_in_progress: It is set if IPA IF returned BECAME FULL
  621. * status after MCS submitted an inline immediate
  622. * command to update the metadata. It allows MCS
  623. * to know that it has to retry sending the TRE
  624. * to IPA.
  625. * @wdi_rx_vdev_id: Rx only. Initialized to 0xFF by SW after allocating channel
  626. * and before starting it. Both FW_DESC and VDEV_ID are part
  627. * of a scratch word that is Read/Write for both MCS and SW.
  628. * To avoid race conditions, SW should not update this field
  629. * after starting the channel.
  630. * @wdi_rx_fw_desc: Rx only. Initialized to 0xFF by SW after allocating channel
  631. * and before starting it. After Start, this is a Read only
  632. * field for SW.
  633. * @endp_metadatareg_offset: Rx only, the offset of IPA_ENDP_INIT_HDR_METADATA
  634. * of the corresponding endpoint in 4B words from IPA
  635. * base address. Read only field for MCS.
  636. * Write for SW.
  637. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  638. * for MCS. Write for SW.
  639. * @wdi_rx_pkt_length: If WDI_RX_TRE_PROC_IN_PROGRESS is set, this field is
  640. * valid and contains the packet length of the TRE that
  641. * needs to be submitted to IPA.
  642. * @resv1: reserved bits.
  643. * @pkt_comp_count: It is incremented on each AOS received. When event ring
  644. * Write index is updated, it is decremented by the same
  645. * amount.
  646. * @stop_in_progress_stm: If a Stop request is in progress, this will indicate
  647. * the current stage of processing of the stop within MCS
  648. * @resv2: reserved bits.
  649. * wdi_rx_qmap_id_internal: Initialized to 0 by MCS when the channel is
  650. * allocated. It is updated to the current value of SW
  651. * QMAP ID that is being written by MCS to the IPA
  652. * metadata register.
  653. */
  654. struct __packed gsi_wdi_channel_scratch {
  655. uint32_t wifi_rx_ri_addr_low;
  656. uint32_t wifi_rx_ri_addr_high;
  657. uint32_t update_ri_moderation_threshold:5;
  658. uint32_t update_ri_moderation_counter:6;
  659. uint32_t wdi_rx_tre_proc_in_progress:1;
  660. uint32_t resv1:4;
  661. uint32_t wdi_rx_vdev_id:8;
  662. uint32_t wdi_rx_fw_desc:8;
  663. uint32_t endp_metadatareg_offset:16;
  664. uint32_t qmap_id:16;
  665. uint32_t wdi_rx_pkt_length:16;
  666. uint32_t resv2:2;
  667. uint32_t pkt_comp_count:11;
  668. uint32_t stop_in_progress_stm:3;
  669. uint32_t resv3:16;
  670. uint32_t wdi_rx_qmap_id_internal:16;
  671. };
  672. /**
  673. * gsi_wdi2_channel_scratch_lito - WDI protocol SW config area of
  674. * channel scratch
  675. *
  676. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  677. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  678. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  679. * N is the number of packets that IPA will
  680. * process before Wifi transfer ring Ri will
  681. * be updated.
  682. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  683. * for MCS. Write for SW.
  684. * @endp_metadatareg_offset: Rx only, the offset of IPA_ENDP_INIT_HDR_METADATA
  685. * of the corresponding endpoint in 4B words from IPA
  686. * base address. Read only field for MCS.
  687. * Write for SW.
  688. * @wdi_rx_vdev_id: Rx only. Initialized to 0xFF by SW after allocating channel
  689. * and before starting it. Both FW_DESC and VDEV_ID are part
  690. * of a scratch word that is Read/Write for both MCS and SW.
  691. * To avoid race conditions, SW should not update this field
  692. * after starting the channel.
  693. * @wdi_rx_fw_desc: Rx only. Initialized to 0xFF by SW after allocating channel
  694. * and before starting it. After Start, this is a Read only
  695. * field for SW.
  696. * @update_ri_moderation_counter: This field is incremented with each TRE
  697. * processed in MCS.
  698. * @wdi_rx_tre_proc_in_progress: It is set if IPA IF returned BECAME FULL
  699. * status after MCS submitted an inline immediate
  700. * command to update the metadata. It allows MCS
  701. * to know that it has to retry sending the TRE
  702. * to IPA.
  703. * @outstanding_tlvs_counter: It is the count of outstanding TLVs submitted to
  704. * IPA by MCS and waiting for AOS completion from IPA.
  705. * @wdi_rx_pkt_length: If WDI_RX_TRE_PROC_IN_PROGRESS is set, this field is
  706. * valid and contains the packet length of the TRE that
  707. * needs to be submitted to IPA.
  708. * @resv1: reserved bits.
  709. * @pkt_comp_count: It is incremented on each AOS received. When event ring
  710. * Write index is updated, it is decremented by the same
  711. * amount.
  712. * @stop_in_progress_stm: If a Stop request is in progress, this will indicate
  713. * the current stage of processing of the stop within MCS
  714. * @resv2: reserved bits.
  715. * wdi_rx_qmap_id_internal: Initialized to 0 by MCS when the channel is
  716. * allocated. It is updated to the current value of SW
  717. * QMAP ID that is being written by MCS to the IPA
  718. * metadata register.
  719. */
  720. struct __packed gsi_wdi2_channel_scratch_new {
  721. uint32_t wifi_rx_ri_addr_low;
  722. uint32_t wifi_rx_ri_addr_high;
  723. uint32_t update_ri_moderation_threshold:5;
  724. uint32_t qmap_id:8;
  725. uint32_t resv1:3;
  726. uint32_t endp_metadatareg_offset:16;
  727. uint32_t wdi_rx_vdev_id:8;
  728. uint32_t wdi_rx_fw_desc:8;
  729. uint32_t update_ri_moderation_counter:6;
  730. uint32_t wdi_rx_tre_proc_in_progress:1;
  731. uint32_t resv4:1;
  732. uint32_t outstanding_tlvs_counter:8;
  733. uint32_t wdi_rx_pkt_length:16;
  734. uint32_t resv2:2;
  735. uint32_t pkt_comp_count:11;
  736. uint32_t stop_in_progress_stm:3;
  737. uint32_t resv3:16;
  738. uint32_t wdi_rx_qmap_id_internal:16;
  739. };
  740. /**
  741. * gsi_mhip_channel_scratch - MHI PRIME protocol SW config area of
  742. * channel scratch
  743. * @assert_bit_40: Valid only for non-host channels.
  744. * Set to 1 for MHI’ channels when running over PCIe.
  745. * @host_channel: Set to 1 for MHIP channel running on host.
  746. *
  747. */
  748. struct __packed gsi_mhip_channel_scratch {
  749. uint32_t assert_bit_40:1;
  750. uint32_t host_channel:1;
  751. uint32_t resvd1:30;
  752. };
  753. /**
  754. * gsi_11ad_rx_channel_scratch - 11AD protocol SW config area of
  755. * RX channel scratch
  756. *
  757. * @status_ring_hwtail_address_lsb: Low 32 bits of status ring hwtail address.
  758. * @status_ring_hwtail_address_msb: High 32 bits of status ring hwtail address.
  759. * @data_buffers_base_address_lsb: Low 32 bits of the data buffers address.
  760. * @data_buffers_base_address_msb: High 32 bits of the data buffers address.
  761. * @fixed_data_buffer_size: the fixed buffer size (> MTU).
  762. * @resv1: reserved bits.
  763. */
  764. struct __packed gsi_11ad_rx_channel_scratch {
  765. uint32_t status_ring_hwtail_address_lsb;
  766. uint32_t status_ring_hwtail_address_msb;
  767. uint32_t data_buffers_base_address_lsb;
  768. uint32_t data_buffers_base_address_msb:8;
  769. uint32_t fixed_data_buffer_size_pow_2:16;
  770. uint32_t resv1:8;
  771. };
  772. /**
  773. * gsi_11ad_tx_channel_scratch - 11AD protocol SW config area of
  774. * TX channel scratch
  775. *
  776. * @status_ring_hwtail_address_lsb: Low 32 bits of status ring hwtail address.
  777. * @status_ring_hwhead_address_lsb: Low 32 bits of status ring hwhead address.
  778. * @status_ring_hwhead_hwtail_8_msb: higher 8 msbs of status ring
  779. * hwhead\hwtail addresses (should be identical).
  780. * @update_status_hwtail_mod_threshold: The threshold in (32B) elements for
  781. * updating descriptor ring 11ad HWTAIL pointer moderation.
  782. * @status_ring_num_elem - the number of elements in the status ring.
  783. * @resv1: reserved bits.
  784. * @fixed_data_buffer_size_pow_2: the fixed buffer size power of 2 (> MTU).
  785. * @resv2: reserved bits.
  786. */
  787. struct __packed gsi_11ad_tx_channel_scratch {
  788. uint32_t status_ring_hwtail_address_lsb;
  789. uint32_t status_ring_hwhead_address_lsb;
  790. uint32_t status_ring_hwhead_hwtail_8_msb:8;
  791. uint32_t update_status_hwtail_mod_threshold:8;
  792. uint32_t status_ring_num_elem:16;
  793. uint32_t resv1:8;
  794. uint32_t fixed_data_buffer_size_pow_2:16;
  795. uint32_t resv2:8;
  796. };
  797. /**
  798. * gsi_wdi3_channel_scratch - WDI protocol 3 SW config area of
  799. * channel scratch
  800. *
  801. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  802. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  803. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  804. * N is the number of packets that IPA will
  805. * process before Wifi transfer ring Ri will
  806. * be updated.
  807. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  808. * for MCS. Write for SW.
  809. * @resv: reserved bits.
  810. * @endp_metadata_reg_offset: Rx only, the offset of
  811. * IPA_ENDP_INIT_HDR_METADATA_n of the
  812. * corresponding endpoint in 4B words from IPA
  813. * base address.
  814. * @rx_pkt_offset: Rx only, Since Rx header length is not fixed,
  815. * WLAN host will pass this information to IPA.
  816. * @resv: reserved bits.
  817. */
  818. struct __packed gsi_wdi3_channel_scratch {
  819. uint32_t wifi_rp_address_low;
  820. uint32_t wifi_rp_address_high;
  821. uint32_t update_rp_moderation_threshold : 5;
  822. uint32_t qmap_id : 8;
  823. uint32_t reserved1 : 3;
  824. uint32_t endp_metadata_reg_offset : 16;
  825. uint32_t rx_pkt_offset : 16;
  826. uint32_t reserved2 : 16;
  827. };
  828. /**
  829. * gsi_wdi3_channel_scratch2 - WDI3 protocol SW config area of
  830. * channel scratch2
  831. *
  832. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  833. * N is the number of packets that IPA will
  834. * process before Wifi transfer ring Ri will
  835. * be updated.
  836. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only
  837. * field for MCS. Write for SW.
  838. * @resv: reserved bits.
  839. * @endp_metadata_reg_offset: Rx only, the offset of
  840. * IPA_ENDP_INIT_HDR_METADATA_n of the
  841. * corresponding endpoint in 4B words from IPA
  842. * base address.
  843. */
  844. struct __packed gsi_wdi3_channel_scratch2 {
  845. uint32_t update_rp_moderation_threshold : 5;
  846. uint32_t qmap_id : 8;
  847. uint32_t reserved1 : 3;
  848. uint32_t endp_metadata_reg_offset : 16;
  849. };
  850. /**
  851. * gsi_wdi3_channel_scratch2_reg - channel scratch2 SW config area
  852. *
  853. */
  854. union __packed gsi_wdi3_channel_scratch2_reg {
  855. struct __packed gsi_wdi3_channel_scratch2 wdi;
  856. struct __packed {
  857. uint32_t word1;
  858. } data;
  859. };
  860. /**
  861. * gsi_channel_scratch - channel scratch SW config area
  862. *
  863. */
  864. union __packed gsi_channel_scratch {
  865. struct __packed gsi_gpi_channel_scratch gpi;
  866. struct __packed gsi_mhi_channel_scratch mhi;
  867. struct __packed gsi_xdci_channel_scratch xdci;
  868. struct __packed gsi_wdi_channel_scratch wdi;
  869. struct __packed gsi_11ad_rx_channel_scratch rx_11ad;
  870. struct __packed gsi_11ad_tx_channel_scratch tx_11ad;
  871. struct __packed gsi_wdi3_channel_scratch wdi3;
  872. struct __packed gsi_mhip_channel_scratch mhip;
  873. struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
  874. struct __packed {
  875. uint32_t word1;
  876. uint32_t word2;
  877. uint32_t word3;
  878. uint32_t word4;
  879. } data;
  880. };
  881. /**
  882. * gsi_wdi_channel_scratch3 - WDI protocol SW config area of
  883. * channel scratch3
  884. */
  885. struct __packed gsi_wdi_channel_scratch3 {
  886. uint32_t endp_metadatareg_offset:16;
  887. uint32_t qmap_id:16;
  888. };
  889. /**
  890. * gsi_wdi_channel_scratch3_reg - channel scratch3 SW config area
  891. *
  892. */
  893. union __packed gsi_wdi_channel_scratch3_reg {
  894. struct __packed gsi_wdi_channel_scratch3 wdi;
  895. struct __packed {
  896. uint32_t word1;
  897. } data;
  898. };
  899. /**
  900. * gsi_wdi2_channel_scratch2 - WDI protocol SW config area of
  901. * channel scratch2
  902. */
  903. struct __packed gsi_wdi2_channel_scratch2 {
  904. uint32_t update_ri_moderation_threshold:5;
  905. uint32_t qmap_id:8;
  906. uint32_t resv1:3;
  907. uint32_t endp_metadatareg_offset:16;
  908. };
  909. /**
  910. * gsi_wdi_channel_scratch2_reg - channel scratch2 SW config area
  911. *
  912. */
  913. union __packed gsi_wdi2_channel_scratch2_reg {
  914. struct __packed gsi_wdi2_channel_scratch2 wdi;
  915. struct __packed {
  916. uint32_t word1;
  917. } data;
  918. };
  919. /**
  920. * gsi_mhi_evt_scratch - MHI protocol SW config area of
  921. * event scratch
  922. */
  923. struct __packed gsi_mhi_evt_scratch {
  924. uint32_t resvd1;
  925. uint32_t resvd2;
  926. };
  927. /**
  928. * gsi_mhip_evt_scratch - MHI PRIME protocol SW config area of
  929. * event scratch
  930. */
  931. struct __packed gsi_mhip_evt_scratch {
  932. uint32_t rp_mod_threshold:8;
  933. uint32_t rp_mod_timer:4;
  934. uint32_t rp_mod_counter:8;
  935. uint32_t rp_mod_timer_id:4;
  936. uint32_t rp_mod_timer_running:1;
  937. uint32_t resvd1:7;
  938. uint32_t fixed_buffer_sz:16;
  939. uint32_t resvd2:16;
  940. };
  941. /**
  942. * gsi_xdci_evt_scratch - xDCI protocol SW config area of
  943. * event scratch
  944. *
  945. */
  946. struct __packed gsi_xdci_evt_scratch {
  947. uint32_t gevntcount_low_addr;
  948. uint32_t gevntcount_hi_addr:8;
  949. uint32_t resvd1:24;
  950. };
  951. /**
  952. * gsi_wdi_evt_scratch - WDI protocol SW config area of
  953. * event scratch
  954. *
  955. */
  956. struct __packed gsi_wdi_evt_scratch {
  957. uint32_t update_ri_moderation_config:8;
  958. uint32_t resvd1:8;
  959. uint32_t update_ri_mod_timer_running:1;
  960. uint32_t evt_comp_count:14;
  961. uint32_t resvd2:1;
  962. uint32_t last_update_ri:16;
  963. uint32_t resvd3:16;
  964. };
  965. /**
  966. * gsi_11ad_evt_scratch - 11AD protocol SW config area of
  967. * event scratch
  968. *
  969. */
  970. struct __packed gsi_11ad_evt_scratch {
  971. uint32_t update_status_hwtail_mod_threshold : 8;
  972. uint32_t resvd1:8;
  973. uint32_t resvd2:16;
  974. uint32_t resvd3;
  975. };
  976. /**
  977. * gsi_wdi3_evt_scratch - wdi3 protocol SW config area of
  978. * event scratch
  979. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  980. * N is the number of packets that IPA will
  981. * process before Wifi transfer ring Ri will
  982. * be updated.
  983. * @reserved1: reserve bit.
  984. * @reserved2: reserve bit.
  985. */
  986. struct __packed gsi_wdi3_evt_scratch {
  987. uint32_t update_rp_moderation_config : 8;
  988. uint32_t reserved1 : 24;
  989. uint32_t reserved2;
  990. };
  991. /**
  992. * gsi_evt_scratch - event scratch SW config area
  993. *
  994. */
  995. union __packed gsi_evt_scratch {
  996. struct __packed gsi_mhi_evt_scratch mhi;
  997. struct __packed gsi_xdci_evt_scratch xdci;
  998. struct __packed gsi_wdi_evt_scratch wdi;
  999. struct __packed gsi_11ad_evt_scratch w11ad;
  1000. struct __packed gsi_wdi3_evt_scratch wdi3;
  1001. struct __packed gsi_mhip_evt_scratch mhip;
  1002. struct __packed {
  1003. uint32_t word1;
  1004. uint32_t word2;
  1005. } data;
  1006. };
  1007. /**
  1008. * gsi_device_scratch - EE scratch config parameters
  1009. *
  1010. * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
  1011. * @mhi_base_chan_idx: base index of IPA MHI channel indexes.
  1012. * IPA MHI channel index = GSI channel ID +
  1013. * MHI base channel index
  1014. * @max_usb_pkt_size_valid: is max_usb_pkt_size valid?
  1015. * @max_usb_pkt_size: max USB packet size in bytes (valid values are
  1016. * 64, 512 and 1024)
  1017. */
  1018. struct gsi_device_scratch {
  1019. bool mhi_base_chan_idx_valid;
  1020. uint8_t mhi_base_chan_idx;
  1021. bool max_usb_pkt_size_valid;
  1022. uint16_t max_usb_pkt_size;
  1023. };
  1024. /**
  1025. * gsi_chan_info - information about channel occupancy
  1026. *
  1027. * @wp: channel write pointer (physical address)
  1028. * @rp: channel read pointer (physical address)
  1029. * @evt_valid: is evt* info valid?
  1030. * @evt_wp: event ring write pointer (physical address)
  1031. * @evt_rp: event ring read pointer (physical address)
  1032. */
  1033. struct gsi_chan_info {
  1034. uint64_t wp;
  1035. uint64_t rp;
  1036. bool evt_valid;
  1037. uint64_t evt_wp;
  1038. uint64_t evt_rp;
  1039. };
  1040. enum gsi_evt_ring_state {
  1041. GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
  1042. GSI_EVT_RING_STATE_ALLOCATED = 0x1,
  1043. GSI_EVT_RING_STATE_ERROR = 0xf
  1044. };
  1045. enum gsi_chan_state {
  1046. GSI_CHAN_STATE_NOT_ALLOCATED = 0x0,
  1047. GSI_CHAN_STATE_ALLOCATED = 0x1,
  1048. GSI_CHAN_STATE_STARTED = 0x2,
  1049. GSI_CHAN_STATE_STOPPED = 0x3,
  1050. GSI_CHAN_STATE_STOP_IN_PROC = 0x4,
  1051. GSI_CHAN_STATE_FLOW_CONTROL = 0x5,
  1052. GSI_CHAN_STATE_ERROR = 0xf
  1053. };
  1054. struct gsi_ring_ctx {
  1055. spinlock_t slock;
  1056. unsigned long base_va;
  1057. uint64_t base;
  1058. uint64_t wp;
  1059. uint64_t rp;
  1060. uint64_t wp_local;
  1061. uint64_t rp_local;
  1062. uint16_t len;
  1063. uint8_t elem_sz;
  1064. uint16_t max_num_elem;
  1065. uint64_t end;
  1066. };
  1067. struct gsi_chan_dp_stats {
  1068. unsigned long ch_below_lo;
  1069. unsigned long ch_below_hi;
  1070. unsigned long ch_above_hi;
  1071. unsigned long empty_time;
  1072. unsigned long last_timestamp;
  1073. };
  1074. struct gsi_chan_stats {
  1075. unsigned long queued;
  1076. unsigned long completed;
  1077. unsigned long callback_to_poll;
  1078. unsigned long poll_to_callback;
  1079. unsigned long poll_pending_irq;
  1080. unsigned long invalid_tre_error;
  1081. unsigned long poll_ok;
  1082. unsigned long poll_empty;
  1083. unsigned long userdata_in_use;
  1084. struct gsi_chan_dp_stats dp;
  1085. };
  1086. /**
  1087. * struct gsi_user_data - user_data element pointed by the TRE
  1088. * @valid: valid to be cleaned. if its true that means it is being used.
  1089. * false means its free to overwrite
  1090. * @p: pointer to the user data array element
  1091. */
  1092. struct gsi_user_data {
  1093. bool valid;
  1094. void *p;
  1095. };
  1096. struct gsi_chan_ctx {
  1097. struct gsi_chan_props props;
  1098. enum gsi_chan_state state;
  1099. struct gsi_ring_ctx ring;
  1100. struct gsi_user_data *user_data;
  1101. struct gsi_evt_ctx *evtr;
  1102. struct mutex mlock;
  1103. struct completion compl;
  1104. bool allocated;
  1105. atomic_t poll_mode;
  1106. union __packed gsi_channel_scratch scratch;
  1107. struct gsi_chan_stats stats;
  1108. bool enable_dp_stats;
  1109. bool print_dp_stats;
  1110. };
  1111. struct gsi_evt_stats {
  1112. unsigned long completed;
  1113. };
  1114. struct gsi_evt_ctx {
  1115. struct gsi_evt_ring_props props;
  1116. enum gsi_evt_ring_state state;
  1117. uint8_t id;
  1118. struct gsi_ring_ctx ring;
  1119. struct mutex mlock;
  1120. struct completion compl;
  1121. struct gsi_chan_ctx *chan;
  1122. atomic_t chan_ref_cnt;
  1123. union __packed gsi_evt_scratch scratch;
  1124. struct gsi_evt_stats stats;
  1125. };
  1126. struct gsi_ee_scratch {
  1127. union __packed {
  1128. struct {
  1129. uint32_t inter_ee_cmd_return_code:3;
  1130. uint32_t resvd1:2;
  1131. uint32_t generic_ee_cmd_return_code:3;
  1132. uint32_t resvd2:7;
  1133. uint32_t max_usb_pkt_size:1;
  1134. uint32_t resvd3:8;
  1135. uint32_t mhi_base_chan_idx:8;
  1136. } s;
  1137. uint32_t val;
  1138. } word0;
  1139. uint32_t word1;
  1140. };
  1141. struct ch_debug_stats {
  1142. unsigned long ch_allocate;
  1143. unsigned long ch_start;
  1144. unsigned long ch_stop;
  1145. unsigned long ch_reset;
  1146. unsigned long ch_de_alloc;
  1147. unsigned long ch_db_stop;
  1148. unsigned long cmd_completed;
  1149. };
  1150. struct gsi_generic_ee_cmd_debug_stats {
  1151. unsigned long halt_channel;
  1152. unsigned long flow_ctrl_channel;
  1153. };
  1154. struct gsi_coal_chan_info {
  1155. uint8_t ch_id;
  1156. uint8_t evchid;
  1157. };
  1158. struct gsi_ctx {
  1159. void __iomem *base;
  1160. struct device *dev;
  1161. struct gsi_per_props per;
  1162. bool per_registered;
  1163. struct gsi_chan_ctx chan[GSI_CHAN_MAX];
  1164. struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
  1165. struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
  1166. struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
  1167. struct mutex mlock;
  1168. spinlock_t slock;
  1169. unsigned long evt_bmap;
  1170. bool enabled;
  1171. atomic_t num_chan;
  1172. atomic_t num_evt_ring;
  1173. struct gsi_ee_scratch scratch;
  1174. int num_ch_dp_stats;
  1175. struct workqueue_struct *dp_stat_wq;
  1176. u32 max_ch;
  1177. u32 max_ev;
  1178. struct completion gen_ee_cmd_compl;
  1179. void *ipc_logbuf;
  1180. void *ipc_logbuf_low;
  1181. struct gsi_coal_chan_info coal_info;
  1182. /*
  1183. * The following used only on emulation systems.
  1184. */
  1185. void __iomem *intcntrlr_base;
  1186. u32 intcntrlr_mem_size;
  1187. irq_handler_t intcntrlr_gsi_isr;
  1188. irq_handler_t intcntrlr_client_isr;
  1189. atomic_t num_unclock_irq;
  1190. };
  1191. enum gsi_re_type {
  1192. GSI_RE_XFER = 0x2,
  1193. GSI_RE_IMMD_CMD = 0x3,
  1194. GSI_RE_NOP = 0x4,
  1195. GSI_RE_COAL = 0x8,
  1196. };
  1197. struct __packed gsi_tre {
  1198. uint64_t buffer_ptr;
  1199. uint16_t buf_len;
  1200. uint16_t resvd1;
  1201. uint16_t chain:1;
  1202. uint16_t resvd4:7;
  1203. uint16_t ieob:1;
  1204. uint16_t ieot:1;
  1205. uint16_t bei:1;
  1206. uint16_t resvd3:5;
  1207. uint8_t re_type;
  1208. uint8_t resvd2;
  1209. };
  1210. struct __packed gsi_gci_tre {
  1211. uint64_t buffer_ptr:41;
  1212. uint64_t resvd1:7;
  1213. uint64_t buf_len:16;
  1214. uint64_t cookie:40;
  1215. uint64_t resvd2:8;
  1216. uint64_t re_type:8;
  1217. uint64_t resvd3:8;
  1218. };
  1219. #define GSI_XFER_COMPL_TYPE_GCI 0x28
  1220. struct __packed gsi_xfer_compl_evt {
  1221. union {
  1222. uint64_t xfer_ptr;
  1223. struct {
  1224. uint64_t cookie:40;
  1225. uint64_t resvd1:24;
  1226. };
  1227. };
  1228. uint16_t len;
  1229. uint8_t veid;
  1230. uint8_t code; /* see gsi_chan_evt */
  1231. uint16_t resvd;
  1232. uint8_t type;
  1233. uint8_t chid;
  1234. };
  1235. enum gsi_err_type {
  1236. GSI_ERR_TYPE_GLOB = 0x1,
  1237. GSI_ERR_TYPE_CHAN = 0x2,
  1238. GSI_ERR_TYPE_EVT = 0x3,
  1239. };
  1240. enum gsi_err_code {
  1241. GSI_INVALID_TRE_ERR = 0x1,
  1242. GSI_OUT_OF_BUFFERS_ERR = 0x2,
  1243. GSI_OUT_OF_RESOURCES_ERR = 0x3,
  1244. GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
  1245. GSI_EVT_RING_EMPTY_ERR = 0x5,
  1246. GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
  1247. GSI_HWO_1_ERR = 0x8
  1248. };
  1249. struct __packed gsi_log_err {
  1250. uint32_t arg3:4;
  1251. uint32_t arg2:4;
  1252. uint32_t arg1:4;
  1253. uint32_t code:4;
  1254. uint32_t resvd:3;
  1255. uint32_t virt_idx:5;
  1256. uint32_t err_type:4;
  1257. uint32_t ee:4;
  1258. };
  1259. enum gsi_ch_cmd_opcode {
  1260. GSI_CH_ALLOCATE = 0x0,
  1261. GSI_CH_START = 0x1,
  1262. GSI_CH_STOP = 0x2,
  1263. GSI_CH_RESET = 0x9,
  1264. GSI_CH_DE_ALLOC = 0xa,
  1265. GSI_CH_DB_STOP = 0xb,
  1266. };
  1267. enum gsi_evt_ch_cmd_opcode {
  1268. GSI_EVT_ALLOCATE = 0x0,
  1269. GSI_EVT_RESET = 0x9,
  1270. GSI_EVT_DE_ALLOC = 0xa,
  1271. };
  1272. enum gsi_generic_ee_cmd_opcode {
  1273. GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
  1274. GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2,
  1275. GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL = 0x3,
  1276. GSI_GEN_EE_CMD_DISABLE_FLOW_CHANNEL = 0x4,
  1277. };
  1278. enum gsi_generic_ee_cmd_return_code {
  1279. GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
  1280. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
  1281. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
  1282. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
  1283. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
  1284. GSI_GEN_EE_CMD_RETURN_CODE_RETRY = 0x6,
  1285. GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES = 0x7,
  1286. };
  1287. extern struct gsi_ctx *gsi_ctx;
  1288. /**
  1289. * gsi_xfer_elem - Metadata about a single transfer
  1290. *
  1291. * @addr: physical address of buffer
  1292. * @len: size of buffer for GSI_XFER_ELEM_DATA:
  1293. * for outbound transfers this is the number of bytes to
  1294. * transfer.
  1295. * for inbound transfers, this is the maximum number of
  1296. * bytes the host expects from device in this transfer
  1297. *
  1298. * immediate command opcode for GSI_XFER_ELEM_IMME_CMD
  1299. * @flags: transfer flags, OR of all the applicable flags
  1300. *
  1301. * GSI_XFER_FLAG_BEI: Block event interrupt
  1302. * 1: Event generated by this ring element must not assert
  1303. * an interrupt to the host
  1304. * 0: Event generated by this ring element must assert an
  1305. * interrupt to the host
  1306. *
  1307. * GSI_XFER_FLAG_EOT: Interrupt on end of transfer
  1308. * 1: If an EOT condition is encountered when processing
  1309. * this ring element, an event is generated by the device
  1310. * with its completion code set to EOT.
  1311. * 0: If an EOT condition is encountered for this ring
  1312. * element, a completion event is not be generated by the
  1313. * device, unless IEOB is 1
  1314. *
  1315. * GSI_XFER_FLAG_EOB: Interrupt on end of block
  1316. * 1: Device notifies host after processing this ring element
  1317. * by sending a completion event
  1318. * 0: Completion event is not required after processing this
  1319. * ring element
  1320. *
  1321. * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
  1322. * elements in a TD
  1323. *
  1324. * @type: transfer type
  1325. *
  1326. * GSI_XFER_ELEM_DATA: for all data transfers
  1327. * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
  1328. * GSI_XFER_ELEM_NOP: for event generation only
  1329. *
  1330. * @xfer_user_data: cookie used in xfer_cb
  1331. *
  1332. */
  1333. struct gsi_xfer_elem {
  1334. uint64_t addr;
  1335. uint16_t len;
  1336. uint16_t flags;
  1337. enum gsi_xfer_elem_type type;
  1338. void *xfer_user_data;
  1339. };
  1340. /**
  1341. * gsi_alloc_evt_ring - Peripheral should call this function to
  1342. * allocate an event ring
  1343. *
  1344. * @props: Event ring properties
  1345. * @dev_hdl: Client handle previously obtained from
  1346. * gsi_register_device
  1347. * @evt_ring_hdl: Handle populated by GSI, opaque to client
  1348. *
  1349. * This function can sleep
  1350. *
  1351. * @Return gsi_status
  1352. */
  1353. int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
  1354. unsigned long *evt_ring_hdl);
  1355. /**
  1356. * gsi_dealloc_evt_ring - Peripheral should call this function to
  1357. * de-allocate an event ring. There should not exist any active
  1358. * channels using this event ring
  1359. *
  1360. * @evt_ring_hdl: Client handle previously obtained from
  1361. * gsi_alloc_evt_ring
  1362. *
  1363. * This function can sleep
  1364. *
  1365. * @Return gsi_status
  1366. */
  1367. int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
  1368. /**
  1369. * gsi_alloc_channel - Peripheral should call this function to
  1370. * allocate a channel
  1371. *
  1372. * @props: Channel properties
  1373. * @dev_hdl: Client handle previously obtained from
  1374. * gsi_register_device
  1375. * @chan_hdl: Handle populated by GSI, opaque to client
  1376. *
  1377. * This function can sleep
  1378. *
  1379. * @Return gsi_status
  1380. */
  1381. int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
  1382. unsigned long *chan_hdl);
  1383. /**
  1384. * gsi_start_channel - Peripheral should call this function to
  1385. * start a channel i.e put into running state
  1386. *
  1387. * @chan_hdl: Client handle previously obtained from
  1388. * gsi_alloc_channel
  1389. *
  1390. * This function can sleep
  1391. *
  1392. * @Return gsi_status
  1393. */
  1394. int gsi_start_channel(unsigned long chan_hdl);
  1395. /**
  1396. * gsi_reset_channel - Peripheral should call this function to
  1397. * reset a channel to recover from error state
  1398. *
  1399. * @chan_hdl: Client handle previously obtained from
  1400. * gsi_alloc_channel
  1401. *
  1402. * This function can sleep
  1403. *
  1404. * @Return gsi_status
  1405. */
  1406. int gsi_reset_channel(unsigned long chan_hdl);
  1407. /**
  1408. * gsi_dealloc_channel - Peripheral should call this function to
  1409. * de-allocate a channel
  1410. *
  1411. * @chan_hdl: Client handle previously obtained from
  1412. * gsi_alloc_channel
  1413. *
  1414. * This function can sleep
  1415. *
  1416. * @Return gsi_status
  1417. */
  1418. int gsi_dealloc_channel(unsigned long chan_hdl);
  1419. /**
  1420. * gsi_poll_channel - Peripheral should call this function to query for
  1421. * completed transfer descriptors.
  1422. *
  1423. * @chan_hdl: Client handle previously obtained from
  1424. * gsi_alloc_channel
  1425. * @notify: Information about the completed transfer if any
  1426. *
  1427. * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
  1428. * completed)
  1429. */
  1430. int gsi_poll_channel(unsigned long chan_hdl,
  1431. struct gsi_chan_xfer_notify *notify);
  1432. /**
  1433. * gsi_config_channel_mode - Peripheral should call this function
  1434. * to configure the channel mode.
  1435. *
  1436. * @chan_hdl: Client handle previously obtained from
  1437. * gsi_alloc_channel
  1438. * @mode: Mode to move the channel into
  1439. *
  1440. * @Return gsi_status
  1441. */
  1442. int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
  1443. /**
  1444. * gsi_queue_xfer - Peripheral should call this function
  1445. * to queue transfers on the given channel
  1446. *
  1447. * @chan_hdl: Client handle previously obtained from
  1448. * gsi_alloc_channel
  1449. * @num_xfers: Number of transfer in the array @ xfer
  1450. * @xfer: Array of num_xfers transfer descriptors
  1451. * @ring_db: If true, tell HW about these queued xfers
  1452. * If false, do not notify HW at this time
  1453. *
  1454. * @Return gsi_status
  1455. */
  1456. int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
  1457. struct gsi_xfer_elem *xfer, bool ring_db);
  1458. void gsi_debugfs_init(void);
  1459. uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
  1460. void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used);
  1461. /**
  1462. * gsi_register_device - Peripheral should call this function to
  1463. * register itself with GSI before invoking any other APIs
  1464. *
  1465. * @props: Peripheral properties
  1466. * @dev_hdl: Handle populated by GSI, opaque to client
  1467. *
  1468. * @Return -GSI_STATUS_AGAIN if request should be re-tried later
  1469. * other error codes for failure
  1470. */
  1471. int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
  1472. /**
  1473. * gsi_complete_clk_grant - Peripheral should call this function to
  1474. * grant the clock resource requested by GSI previously that could not
  1475. * be granted synchronously. GSI will release the clock resource using
  1476. * the rel_clk_cb when appropriate
  1477. *
  1478. * @dev_hdl: Client handle previously obtained from
  1479. * gsi_register_device
  1480. *
  1481. * @Return gsi_status
  1482. */
  1483. int gsi_complete_clk_grant(unsigned long dev_hdl);
  1484. /**
  1485. * gsi_write_device_scratch - Peripheral should call this function to
  1486. * write to the EE scratch area
  1487. *
  1488. * @dev_hdl: Client handle previously obtained from
  1489. * gsi_register_device
  1490. * @val: Value to write
  1491. *
  1492. * @Return gsi_status
  1493. */
  1494. int gsi_write_device_scratch(unsigned long dev_hdl,
  1495. struct gsi_device_scratch *val);
  1496. /**
  1497. * gsi_deregister_device - Peripheral should call this function to
  1498. * de-register itself with GSI
  1499. *
  1500. * @dev_hdl: Client handle previously obtained from
  1501. * gsi_register_device
  1502. * @force: When set to true, cleanup is performed even if there
  1503. * are in use resources like channels, event rings, etc.
  1504. * this would be used after GSI reset to recover from some
  1505. * fatal error
  1506. * When set to false, there must not exist any allocated
  1507. * channels and event rings.
  1508. *
  1509. * @Return gsi_status
  1510. */
  1511. int gsi_deregister_device(unsigned long dev_hdl, bool force);
  1512. /**
  1513. * gsi_write_evt_ring_scratch - Peripheral should call this function to
  1514. * write to the scratch area of the event ring context
  1515. *
  1516. * @evt_ring_hdl: Client handle previously obtained from
  1517. * gsi_alloc_evt_ring
  1518. * @val: Value to write
  1519. *
  1520. * @Return gsi_status
  1521. */
  1522. int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1523. union __packed gsi_evt_scratch val);
  1524. /**
  1525. * gsi_query_evt_ring_db_addr - Peripheral should call this function to
  1526. * query the physical addresses of the event ring doorbell registers
  1527. *
  1528. * @evt_ring_hdl: Client handle previously obtained from
  1529. * gsi_alloc_evt_ring
  1530. * @db_addr_wp_lsb: Physical address of doorbell register where the 32
  1531. * LSBs of the doorbell value should be written
  1532. * @db_addr_wp_msb: Physical address of doorbell register where the 32
  1533. * MSBs of the doorbell value should be written
  1534. *
  1535. * @Return gsi_status
  1536. */
  1537. int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
  1538. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
  1539. /**
  1540. * gsi_ring_evt_ring_db - Peripheral should call this function for
  1541. * ringing the event ring doorbell with given value
  1542. *
  1543. * @evt_ring_hdl: Client handle previously obtained from
  1544. * gsi_alloc_evt_ring
  1545. * @value: The value to be used for ringing the doorbell
  1546. *
  1547. * @Return gsi_status
  1548. */
  1549. int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value);
  1550. /**
  1551. * gsi_ring_ch_ring_db - Peripheral should call this function for
  1552. * ringing the channel ring doorbell with given value
  1553. *
  1554. * @chan_hdl: Client handle previously obtained from
  1555. * gsi_alloc_channel
  1556. * @value: The value to be used for ringing the doorbell
  1557. *
  1558. * @Return gsi_status
  1559. */
  1560. int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value);
  1561. /**
  1562. * gsi_reset_evt_ring - Peripheral should call this function to
  1563. * reset an event ring to recover from error state
  1564. *
  1565. * @evt_ring_hdl: Client handle previously obtained from
  1566. * gsi_alloc_evt_ring
  1567. *
  1568. * This function can sleep
  1569. *
  1570. * @Return gsi_status
  1571. */
  1572. int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
  1573. /**
  1574. * gsi_get_evt_ring_cfg - This function returns the current config
  1575. * of the specified event ring
  1576. *
  1577. * @evt_ring_hdl: Client handle previously obtained from
  1578. * gsi_alloc_evt_ring
  1579. * @props: where to copy properties to
  1580. * @scr: where to copy scratch info to
  1581. *
  1582. * @Return gsi_status
  1583. */
  1584. int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
  1585. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
  1586. /**
  1587. * gsi_set_evt_ring_cfg - This function applies the supplied config
  1588. * to the specified event ring.
  1589. *
  1590. * exclusive property of the event ring cannot be changed after
  1591. * gsi_alloc_evt_ring
  1592. *
  1593. * @evt_ring_hdl: Client handle previously obtained from
  1594. * gsi_alloc_evt_ring
  1595. * @props: the properties to apply
  1596. * @scr: the scratch info to apply
  1597. *
  1598. * @Return gsi_status
  1599. */
  1600. int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
  1601. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
  1602. /**
  1603. * gsi_write_channel_scratch - Peripheral should call this function to
  1604. * write to the scratch area of the channel context
  1605. *
  1606. * @chan_hdl: Client handle previously obtained from
  1607. * gsi_alloc_channel
  1608. * @val: Value to write
  1609. *
  1610. * @Return gsi_status
  1611. */
  1612. int gsi_write_channel_scratch(unsigned long chan_hdl,
  1613. union __packed gsi_channel_scratch val);
  1614. /**
  1615. * gsi_write_channel_scratch3_reg - Peripheral should call this function to
  1616. * write to the scratch3 reg area of the channel context
  1617. *
  1618. * @chan_hdl: Client handle previously obtained from
  1619. * gsi_alloc_channel
  1620. * @val: Value to write
  1621. *
  1622. * @Return gsi_status
  1623. */
  1624. int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
  1625. union __packed gsi_wdi_channel_scratch3_reg val);
  1626. /**
  1627. * gsi_write_channel_scratch2_reg - Peripheral should call this function to
  1628. * write to the scratch2 reg area of the channel context
  1629. *
  1630. * @chan_hdl: Client handle previously obtained from
  1631. * gsi_alloc_channel
  1632. * @val: Value to write
  1633. *
  1634. * @Return gsi_status
  1635. */
  1636. int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
  1637. union __packed gsi_wdi2_channel_scratch2_reg val);
  1638. /**
  1639. * gsi_write_wdi3_channel_scratch2_reg - Peripheral should call this function
  1640. * to write to the WDI3 scratch 3 register area of the channel context
  1641. *
  1642. * @chan_hdl: Client handle previously obtained from
  1643. * gsi_alloc_channel
  1644. * @val: Read value
  1645. *
  1646. * @Return gsi_status
  1647. */
  1648. int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  1649. union __packed gsi_wdi3_channel_scratch2_reg val);
  1650. /**
  1651. * gsi_read_channel_scratch - Peripheral should call this function to
  1652. * read to the scratch area of the channel context
  1653. *
  1654. * @chan_hdl: Client handle previously obtained from
  1655. * gsi_alloc_channel
  1656. * @val: Read value
  1657. *
  1658. * @Return gsi_status
  1659. */
  1660. int gsi_read_channel_scratch(unsigned long chan_hdl,
  1661. union __packed gsi_channel_scratch *val);
  1662. /**
  1663. * gsi_read_wdi3_channel_scratch2_reg - Peripheral should call this function to
  1664. * read to the WDI3 scratch 2 register area of the channel context
  1665. *
  1666. * @chan_hdl: Client handle previously obtained from
  1667. * gsi_alloc_channel
  1668. * @val: Read value
  1669. *
  1670. * @Return gsi_status
  1671. */
  1672. int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  1673. union __packed gsi_wdi3_channel_scratch2_reg *val);
  1674. /*
  1675. * gsi_pending_irq_type - Peripheral should call this function to
  1676. * check if there is any pending irq
  1677. *
  1678. * This function can sleep
  1679. *
  1680. * @Return gsi_irq_type
  1681. */
  1682. int gsi_pending_irq_type(void);
  1683. /**
  1684. * gsi_update_mhi_channel_scratch - MHI Peripheral should call this
  1685. * function to update the scratch area of the channel context. Updating
  1686. * will be by read-modify-write method, so non SWI fields will not be
  1687. * affected
  1688. *
  1689. * @chan_hdl: Client handle previously obtained from
  1690. * gsi_alloc_channel
  1691. * @mscr: MHI Channel Scratch value
  1692. *
  1693. * @Return gsi_status
  1694. */
  1695. int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
  1696. struct __packed gsi_mhi_channel_scratch mscr);
  1697. /**
  1698. * gsi_stop_channel - Peripheral should call this function to
  1699. * stop a channel. Stop will happen on a packet boundary
  1700. *
  1701. * @chan_hdl: Client handle previously obtained from
  1702. * gsi_alloc_channel
  1703. *
  1704. * This function can sleep
  1705. *
  1706. * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
  1707. * other error codes for failure
  1708. */
  1709. int gsi_stop_channel(unsigned long chan_hdl);
  1710. /**
  1711. * gsi_stop_db_channel - Peripheral should call this function to
  1712. * stop a channel when all transfer elements till the doorbell
  1713. * have been processed
  1714. *
  1715. * @chan_hdl: Client handle previously obtained from
  1716. * gsi_alloc_channel
  1717. *
  1718. * This function can sleep
  1719. *
  1720. * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
  1721. * other error codes for failure
  1722. */
  1723. int gsi_stop_db_channel(unsigned long chan_hdl);
  1724. /**
  1725. * gsi_query_channel_db_addr - Peripheral should call this function to
  1726. * query the physical addresses of the channel doorbell registers
  1727. *
  1728. * @chan_hdl: Client handle previously obtained from
  1729. * gsi_alloc_channel
  1730. * @db_addr_wp_lsb: Physical address of doorbell register where the 32
  1731. * LSBs of the doorbell value should be written
  1732. * @db_addr_wp_msb: Physical address of doorbell register where the 32
  1733. * MSBs of the doorbell value should be written
  1734. *
  1735. * @Return gsi_status
  1736. */
  1737. int gsi_query_channel_db_addr(unsigned long chan_hdl,
  1738. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
  1739. /**
  1740. * gsi_query_channel_info - Peripheral can call this function to query the
  1741. * channel and associated event ring (if any) status.
  1742. *
  1743. * @chan_hdl: Client handle previously obtained from
  1744. * gsi_alloc_channel
  1745. * @info: Where to read the values into
  1746. *
  1747. * @Return gsi_status
  1748. */
  1749. int gsi_query_channel_info(unsigned long chan_hdl,
  1750. struct gsi_chan_info *info);
  1751. /**
  1752. * gsi_is_channel_empty - Peripheral can call this function to query if
  1753. * the channel is empty. This is only applicable to GPI. "Empty" means
  1754. * GSI has consumed all descriptors for a TO_GSI channel and SW has
  1755. * processed all completed descriptors for a FROM_GSI channel.
  1756. *
  1757. * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
  1758. * @is_empty: set by GSI based on channel emptiness
  1759. *
  1760. * @Return gsi_status
  1761. */
  1762. int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
  1763. /**
  1764. * gsi_get_channel_cfg - This function returns the current config
  1765. * of the specified channel
  1766. *
  1767. * @chan_hdl: Client handle previously obtained from
  1768. * gsi_alloc_channel
  1769. * @props: where to copy properties to
  1770. * @scr: where to copy scratch info to
  1771. *
  1772. * @Return gsi_status
  1773. */
  1774. int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  1775. union gsi_channel_scratch *scr);
  1776. /**
  1777. * gsi_set_channel_cfg - This function applies the supplied config
  1778. * to the specified channel
  1779. *
  1780. * ch_id and evt_ring_hdl of the channel cannot be changed after
  1781. * gsi_alloc_channel
  1782. *
  1783. * @chan_hdl: Client handle previously obtained from
  1784. * gsi_alloc_channel
  1785. * @props: the properties to apply
  1786. * @scr: the scratch info to apply
  1787. *
  1788. * @Return gsi_status
  1789. */
  1790. int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  1791. union gsi_channel_scratch *scr);
  1792. /**
  1793. * gsi_poll_n_channel - Peripheral should call this function to query for
  1794. * completed transfer descriptors.
  1795. *
  1796. * @chan_hdl: Client handle previously obtained from
  1797. * gsi_alloc_channel
  1798. * @notify: Information about the completed transfer if any
  1799. * @expected_num: Number of descriptor we want to poll each time.
  1800. * @actual_num: Actual number of descriptor we polled successfully.
  1801. *
  1802. * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
  1803. * completed)
  1804. */
  1805. int gsi_poll_n_channel(unsigned long chan_hdl,
  1806. struct gsi_chan_xfer_notify *notify,
  1807. int expected_num, int *actual_num);
  1808. /**
  1809. * gsi_start_xfer - Peripheral should call this function to
  1810. * inform HW about queued xfers
  1811. *
  1812. * @chan_hdl: Client handle previously obtained from
  1813. * gsi_alloc_channel
  1814. *
  1815. * @Return gsi_status
  1816. */
  1817. int gsi_start_xfer(unsigned long chan_hdl);
  1818. /**
  1819. * gsi_configure_regs - Peripheral should call this function
  1820. * to configure the GSI registers before/after the FW is
  1821. * loaded but before it is enabled.
  1822. *
  1823. * @per_base_addr: Base address of the peripheral using GSI
  1824. * @ver: GSI core version
  1825. *
  1826. * @Return gsi_status
  1827. */
  1828. int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver);
  1829. /**
  1830. * gsi_enable_fw - Peripheral should call this function
  1831. * to enable the GSI FW after the FW has been loaded to the SRAM.
  1832. *
  1833. * @gsi_base_addr: Base address of GSI register space
  1834. * @gsi_size: Mapping size of the GSI register space
  1835. * @ver: GSI core version
  1836. * @Return gsi_status
  1837. */
  1838. int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
  1839. /**
  1840. * gsi_get_inst_ram_offset_and_size - Peripheral should call this function
  1841. * to get instruction RAM base address offset and size. Peripheral typically
  1842. * uses this info to load GSI FW into the IRAM.
  1843. *
  1844. * @base_offset:[OUT] - IRAM base offset address
  1845. * @size: [OUT] - IRAM size
  1846. * @ver: GSI core version
  1847. * @Return none
  1848. */
  1849. void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
  1850. unsigned long *size, enum gsi_ver ver);
  1851. /**
  1852. * gsi_halt_channel_ee - Peripheral should call this function
  1853. * to stop other EE's channel. This is usually used in SSR clean
  1854. *
  1855. * @chan_idx: Virtual channel index
  1856. * @ee: EE
  1857. * @code: [out] response code for operation
  1858. * @Return gsi_status
  1859. */
  1860. int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
  1861. /**
  1862. * gsi_wdi3_write_evt_ring_db - write event ring doorbell address
  1863. *
  1864. * @chan_hdl: gsi channel handle
  1865. * @Return gsi_status
  1866. */
  1867. void gsi_wdi3_write_evt_ring_db(unsigned long chan_hdl, uint32_t db_addr_low,
  1868. uint32_t db_addr_high);
  1869. /**
  1870. * gsi_wdi3_dump_register - dump wdi3 related gsi registers
  1871. *
  1872. * @chan_hdl: gsi channel handle
  1873. */
  1874. void gsi_wdi3_dump_register(unsigned long chan_hdl);
  1875. /**
  1876. * gsi_map_base - Peripheral should call this function to configure
  1877. * access to the GSI registers.
  1878. * @gsi_base_addr: Base address of GSI register space
  1879. * @gsi_size: Mapping size of the GSI register space
  1880. *
  1881. * @Return gsi_status
  1882. */
  1883. int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size);
  1884. /**
  1885. * gsi_unmap_base - Peripheral should call this function to undo the
  1886. * effects of gsi_map_base
  1887. *
  1888. * @Return gsi_status
  1889. */
  1890. int gsi_unmap_base(void);
  1891. /**
  1892. * gsi_map_virtual_ch_to_per_ep - Peripheral should call this function
  1893. * to configure each GSI virtual channel with the per endpoint index.
  1894. *
  1895. * @ee: The ee to be used
  1896. * @chan_num: The channel to be used
  1897. * @per_ep_index: value to assign
  1898. *
  1899. * @Return gsi_status
  1900. */
  1901. int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index);
  1902. /**
  1903. * gsi_alloc_channel_ee - Peripheral should call this function
  1904. * to alloc other EE's channel. This is usually done in bootup to allocate all
  1905. * chnnels.
  1906. *
  1907. * @chan_idx: Virtual channel index
  1908. * @ee: EE
  1909. * @code: [out] response code for operation
  1910. * @Return gsi_status
  1911. */
  1912. int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
  1913. /**
  1914. * gsi_enable_flow_control_ee - Peripheral should call this function
  1915. * to enable flow control other EE's channel. This is usually done in USB
  1916. * connent and SSR scenarios.
  1917. *
  1918. * @chan_idx: Virtual channel index
  1919. * @ee: EE
  1920. * @code: [out] response code for operation
  1921. * @Return gsi_status
  1922. */
  1923. int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  1924. int *code);
  1925. /*
  1926. * Here is a typical sequence of calls
  1927. *
  1928. * gsi_register_device
  1929. *
  1930. * gsi_write_device_scratch (if the protocol needs this)
  1931. *
  1932. * gsi_alloc_evt_ring (for as many event rings as needed)
  1933. * gsi_write_evt_ring_scratch
  1934. *
  1935. * gsi_alloc_channel (for as many channels as needed; channels can have
  1936. * no event ring, an exclusive event ring or a shared event ring)
  1937. * gsi_write_channel_scratch
  1938. * gsi_read_channel_scratch
  1939. * gsi_start_channel
  1940. * gsi_queue_xfer/gsi_start_xfer
  1941. * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
  1942. * xfer completions)
  1943. * gsi_stop_db_channel/gsi_stop_channel
  1944. *
  1945. * gsi_dealloc_channel
  1946. *
  1947. * gsi_dealloc_evt_ring
  1948. *
  1949. * gsi_deregister_device
  1950. *
  1951. */
  1952. #endif