gsi.h 78 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  4. *
  5. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  6. */
  7. #ifndef GSI_H
  8. #define GSI_H
  9. #include <linux/device.h>
  10. #include <linux/types.h>
  11. #include <linux/completion.h>
  12. #include <linux/mutex.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/msm_gsi.h>
  15. #include <linux/errno.h>
  16. #include <linux/ipc_logging.h>
  17. #include <linux/iommu.h>
  18. #include <linux/msi.h>
  19. /*
  20. * The following for adding code (ie. for EMULATION) not found on x86.
  21. */
  22. #if defined(CONFIG_IPA_EMULATION)
  23. # include "gsi_emulation_stubs.h"
  24. #endif
  25. #define GSI_ASSERT() \
  26. BUG()
  27. #define GSI_CHAN_MAX 36
  28. #define GSI_EVT_RING_MAX 31
  29. #define GSI_NO_EVT_ERINDEX 255
  30. #define GSI_ISR_CACHE_MAX 20
  31. #define MAX_CHANNELS_SHARING_EVENT_RING 2
  32. #define GSI_IPC_LOGGING(buf, fmt, args...) \
  33. do { \
  34. if (buf) \
  35. ipc_log_string((buf), fmt, __func__, __LINE__, \
  36. ## args); \
  37. } while (0)
  38. #define GSIDBG(fmt, args...) \
  39. do { \
  40. dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  41. ## args);\
  42. if (gsi_ctx) { \
  43. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
  44. "%s:%d " fmt, ## args); \
  45. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  46. "%s:%d " fmt, ## args); \
  47. } \
  48. } while (0)
  49. #define GSIDBG_LOW(fmt, args...) \
  50. do { \
  51. dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  52. ## args);\
  53. if (gsi_ctx) { \
  54. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  55. "%s:%d " fmt, ## args); \
  56. } \
  57. } while (0)
  58. #define GSIERR(fmt, args...) \
  59. do { \
  60. dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
  61. ## args);\
  62. if (gsi_ctx) { \
  63. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
  64. "%s:%d " fmt, ## args); \
  65. GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
  66. "%s:%d " fmt, ## args); \
  67. } \
  68. } while (0)
  69. #define GSI_IPC_LOG_PAGES 50
  70. #define GSI_MAX_NUM_MSI 2
  71. enum gsi_ver {
  72. GSI_VER_ERR = 0,
  73. GSI_VER_1_0 = 1,
  74. GSI_VER_1_2 = 2,
  75. GSI_VER_1_3 = 3,
  76. GSI_VER_2_0 = 4,
  77. GSI_VER_2_2 = 5,
  78. GSI_VER_2_5 = 6,
  79. GSI_VER_2_7 = 7,
  80. GSI_VER_2_9 = 8,
  81. GSI_VER_2_11 = 9,
  82. GSI_VER_3_0 = 10,
  83. GSI_VER_5_5 = 11,
  84. GSI_VER_MAX,
  85. };
  86. enum gsi_status {
  87. GSI_STATUS_SUCCESS = 0,
  88. GSI_STATUS_ERROR = 1,
  89. GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
  90. GSI_STATUS_RING_EMPTY = 3,
  91. GSI_STATUS_RES_ALLOC_FAILURE = 4,
  92. GSI_STATUS_BAD_STATE = 5,
  93. GSI_STATUS_INVALID_PARAMS = 6,
  94. GSI_STATUS_UNSUPPORTED_OP = 7,
  95. GSI_STATUS_NODEV = 8,
  96. GSI_STATUS_POLL_EMPTY = 9,
  97. GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
  98. GSI_STATUS_TIMED_OUT = 11,
  99. GSI_STATUS_AGAIN = 12,
  100. GSI_STATUS_PENDING_IRQ = 13,
  101. };
  102. enum gsi_intr_type {
  103. GSI_INTR_MSI = 0x0,
  104. GSI_INTR_IRQ = 0x1
  105. };
  106. enum gsi_evt_err {
  107. GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
  108. GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
  109. GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
  110. GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
  111. };
  112. /**
  113. * gsi_evt_err_notify - event ring error callback info
  114. *
  115. * @user_data: cookie supplied in gsi_alloc_evt_ring
  116. * @evt_id: type of error
  117. * @err_desc: more info about the error
  118. *
  119. */
  120. struct gsi_evt_err_notify {
  121. void *user_data;
  122. enum gsi_evt_err evt_id;
  123. uint16_t err_desc;
  124. };
  125. enum gsi_evt_chtype {
  126. GSI_EVT_CHTYPE_MHI_EV = 0x0,
  127. GSI_EVT_CHTYPE_XHCI_EV = 0x1,
  128. GSI_EVT_CHTYPE_GPI_EV = 0x2,
  129. GSI_EVT_CHTYPE_XDCI_EV = 0x3,
  130. GSI_EVT_CHTYPE_WDI2_EV = 0x4,
  131. GSI_EVT_CHTYPE_GCI_EV = 0x5,
  132. GSI_EVT_CHTYPE_WDI3_EV = 0x6,
  133. GSI_EVT_CHTYPE_MHIP_EV = 0x7,
  134. GSI_EVT_CHTYPE_AQC_EV = 0x8,
  135. GSI_EVT_CHTYPE_11AD_EV = 0x9,
  136. GSI_EVT_CHTYPE_RTK_EV = 0xC,
  137. GSI_EVT_CHTYPE_NTN_EV = 0xD,
  138. };
  139. enum gsi_evt_ring_elem_size {
  140. GSI_EVT_RING_RE_SIZE_4B = 4,
  141. GSI_EVT_RING_RE_SIZE_8B = 8,
  142. GSI_EVT_RING_RE_SIZE_16B = 16,
  143. GSI_EVT_RING_RE_SIZE_32B = 32,
  144. };
  145. /**
  146. * gsi_evt_ring_props - Event ring related properties
  147. *
  148. * @intf: interface type (of the associated channel)
  149. * @intr: interrupt type
  150. * @re_size: size of event ring element
  151. * @ring_len: length of ring in bytes (must be integral multiple of
  152. * re_size)
  153. * @ring_base_addr: physical base address of ring. Address must be aligned to
  154. * ring_len rounded to power of two
  155. * @ring_base_vaddr: virtual base address of ring (set to NULL when not
  156. * applicable)
  157. * @int_modt: cycles base interrupt moderation (32KHz clock)
  158. * @int_modc: interrupt moderation packet counter
  159. * @intvec: write data for MSI write
  160. * @msi_irq: MSI irq number
  161. * @msi_addr: MSI address, APSS_GICA_SETSPI_NSR reg address
  162. * @msi_clear_addr: MSI address, APSS_GICA_CLRSPI_NSR reg address
  163. * @rp_update_addr: physical address to which event read pointer should be
  164. * written on every event generation. must be set to 0 when
  165. * no update is desdired
  166. * @rp_update_vaddr: virtual address of event ring read pointer (set to NULL
  167. * when not applicable)
  168. * @exclusive: if true, only one GSI channel can be associated with this
  169. * event ring. if false, the event ring can be shared among
  170. * multiple GSI channels but in that case no polling
  171. * (GSI_CHAN_MODE_POLL) is supported on any of those channels
  172. * @err_cb: error notification callback
  173. * @user_data: cookie used for error notifications
  174. * @evchid_valid: is evchid valid?
  175. * @evchid: the event ID that is being specifically requested (this is
  176. * relevant for MHI where doorbell routing requires ERs to be
  177. * physically contiguous)
  178. * @gsi_read_event_ring_rp: function reads the value of the event ring RP.
  179. */
  180. struct gsi_evt_ring_props {
  181. enum gsi_evt_chtype intf;
  182. enum gsi_intr_type intr;
  183. enum gsi_evt_ring_elem_size re_size;
  184. uint32_t ring_len;
  185. uint64_t ring_base_addr;
  186. void *ring_base_vaddr;
  187. uint16_t int_modt;
  188. uint8_t int_modc;
  189. uint32_t intvec;
  190. uint32_t msi_irq;
  191. uint64_t msi_addr;
  192. uint64_t msi_addr_iore_mapped;
  193. uint64_t msi_clear_addr;
  194. uint64_t rp_update_addr;
  195. void *rp_update_vaddr;
  196. bool exclusive;
  197. void (*err_cb)(struct gsi_evt_err_notify *notify);
  198. void *user_data;
  199. bool evchid_valid;
  200. uint8_t evchid;
  201. uint64_t (*gsi_read_event_ring_rp)(struct gsi_evt_ring_props *props,
  202. uint8_t id, int ee);
  203. };
  204. enum gsi_chan_mode {
  205. GSI_CHAN_MODE_CALLBACK = 0x0,
  206. GSI_CHAN_MODE_POLL = 0x1,
  207. };
  208. enum gsi_chan_prot {
  209. GSI_CHAN_PROT_MHI = 0x0,
  210. GSI_CHAN_PROT_XHCI = 0x1,
  211. GSI_CHAN_PROT_GPI = 0x2,
  212. GSI_CHAN_PROT_XDCI = 0x3,
  213. GSI_CHAN_PROT_WDI2 = 0x4,
  214. GSI_CHAN_PROT_GCI = 0x5,
  215. GSI_CHAN_PROT_WDI3 = 0x6,
  216. GSI_CHAN_PROT_MHIP = 0x7,
  217. GSI_CHAN_PROT_AQC = 0x8,
  218. GSI_CHAN_PROT_11AD = 0x9,
  219. GSI_CHAN_PROT_MHIC = 0xA,
  220. GSI_CHAN_PROT_QDSS = 0xB,
  221. GSI_CHAN_PROT_RTK = 0xC,
  222. GSI_CHAN_PROT_NTN = 0xD,
  223. };
  224. enum gsi_max_prefetch {
  225. GSI_ONE_PREFETCH_SEG = 0x0,
  226. GSI_TWO_PREFETCH_SEG = 0x1
  227. };
  228. enum gsi_per_evt {
  229. GSI_PER_EVT_GLOB_ERROR,
  230. GSI_PER_EVT_GLOB_GP1,
  231. GSI_PER_EVT_GLOB_GP2,
  232. GSI_PER_EVT_GLOB_GP3,
  233. GSI_PER_EVT_GENERAL_BREAK_POINT,
  234. GSI_PER_EVT_GENERAL_BUS_ERROR,
  235. GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
  236. GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
  237. };
  238. /**
  239. * gsi_per_notify - Peripheral callback info
  240. *
  241. * @user_data: cookie supplied in gsi_register_device
  242. * @evt_id: type of notification
  243. * @err_desc: error related information
  244. *
  245. */
  246. struct gsi_per_notify {
  247. void *user_data;
  248. enum gsi_per_evt evt_id;
  249. union {
  250. uint16_t err_desc;
  251. } data;
  252. };
  253. /**
  254. * gsi_per_props - Peripheral related properties
  255. *
  256. * @gsi: GSI core version
  257. * @ee: EE where this driver and peripheral driver runs
  258. * @intr: control interrupt type
  259. * @intvec: write data for MSI write
  260. * @msi_addr: MSI address
  261. * @irq: IRQ number
  262. * @phys_addr: physical address of GSI block
  263. * @size: register size of GSI block
  264. * @emulator_intcntrlr_addr: the location of emulator's interrupt control block
  265. * @emulator_intcntrlr_size: the sise of emulator_intcntrlr_addr
  266. * @emulator_intcntrlr_client_isr: client's isr. Called by the emulator's isr
  267. * @mhi_er_id_limits_valid: valid flag for mhi_er_id_limits
  268. * @mhi_er_id_limits: MHI event ring start and end ids
  269. * @notify_cb: general notification callback
  270. * @req_clk_cb: callback to request peripheral clock
  271. * granted should be set to true if request is completed
  272. * synchronously, false otherwise (peripheral needs
  273. * to call gsi_complete_clk_grant later when request is
  274. * completed)
  275. * if this callback is not provided, then GSI will assume
  276. * peripheral is clocked at all times
  277. * @rel_clk_cb: callback to release peripheral clock
  278. * @user_data: cookie used for notifications
  279. * @clk_status_cb: callback to update the current msm bus clock vote
  280. * @enable_clk_bug_on: enable IPA clock for dump saving before assert
  281. * @skip_ieob_mask_wa: flag for skipping ieob_mask_wa
  282. * All the callbacks are in interrupt context
  283. * @tx_poll: propagate to relevant gsi channels that tx polling feature is on
  284. *
  285. */
  286. struct gsi_per_props {
  287. enum gsi_ver ver;
  288. unsigned int ee;
  289. enum gsi_intr_type intr;
  290. uint32_t intvec;
  291. uint64_t msi_addr;
  292. unsigned int irq;
  293. phys_addr_t phys_addr;
  294. unsigned long size;
  295. phys_addr_t emulator_intcntrlr_addr;
  296. unsigned long emulator_intcntrlr_size;
  297. irq_handler_t emulator_intcntrlr_client_isr;
  298. bool mhi_er_id_limits_valid;
  299. uint32_t mhi_er_id_limits[2];
  300. void (*notify_cb)(struct gsi_per_notify *notify);
  301. void (*req_clk_cb)(void *user_data, bool *granted);
  302. int (*rel_clk_cb)(void *user_data);
  303. void *user_data;
  304. int (*clk_status_cb)(void);
  305. void (*enable_clk_bug_on)(void);
  306. bool skip_ieob_mask_wa;
  307. bool tx_poll;
  308. };
  309. enum gsi_chan_evt {
  310. GSI_CHAN_EVT_INVALID = 0x0,
  311. GSI_CHAN_EVT_SUCCESS = 0x1,
  312. GSI_CHAN_EVT_EOT = 0x2,
  313. GSI_CHAN_EVT_OVERFLOW = 0x3,
  314. GSI_CHAN_EVT_EOB = 0x4,
  315. GSI_CHAN_EVT_OOB = 0x5,
  316. GSI_CHAN_EVT_DB_MODE = 0x6,
  317. GSI_CHAN_EVT_UNDEFINED = 0x10,
  318. GSI_CHAN_EVT_RE_ERROR = 0x11,
  319. };
  320. /**
  321. * gsi_chan_xfer_veid - Virtual Channel ID
  322. *
  323. * @GSI_VEID_0: transfer completed for VEID 0
  324. * @GSI_VEID_1: transfer completed for VEID 1
  325. * @GSI_VEID_2: transfer completed for VEID 2
  326. * @GSI_VEID_3: transfer completed for VEID 3
  327. * @GSI_VEID_4: transfer completed for VEID 4
  328. * @GSI_VEID_5: transfer completed for VEID 5
  329. * @GSI_VEID_6: transfer completed for VEID 6
  330. * @GSI_VEID_7: transfer completed for VEID 7
  331. * @GSI_VEID_8: transfer completed for VEID 8
  332. * @GSI_VEID_9: transfer completed for VEID 9
  333. * @GSI_VEID_10: transfer completed for VEID 10
  334. * @GSI_VEID_11: transfer completed for VEID 11
  335. * @GSI_VEID_12: transfer completed for VEID 12
  336. * @GSI_VEID_13: transfer completed for VEID 13
  337. * @GSI_VEID_14: transfer completed for VEID 14
  338. * @GSI_VEID_15: transfer completed for VEID 15
  339. * @GSI_VEID_DEFAULT: used when veid is invalid
  340. */
  341. enum gsi_chan_xfer_veid {
  342. GSI_VEID_0 = 0,
  343. GSI_VEID_1 = 1,
  344. GSI_VEID_2 = 2,
  345. GSI_VEID_3 = 3,
  346. GSI_VEID_4 = 4,
  347. GSI_VEID_5 = 5,
  348. GSI_VEID_6 = 6,
  349. GSI_VEID_7 = 7,
  350. GSI_VEID_8 = 8,
  351. GSI_VEID_9 = 9,
  352. GSI_VEID_10 = 10,
  353. GSI_VEID_11 = 11,
  354. GSI_VEID_12 = 12,
  355. GSI_VEID_13 = 13,
  356. GSI_VEID_14 = 14,
  357. GSI_VEID_15 = 15,
  358. GSI_VEID_DEFAULT,
  359. GSI_VEID_MAX
  360. };
  361. /**
  362. * gsi_chan_xfer_notify - Channel callback info
  363. *
  364. * @chan_user_data: cookie supplied in gsi_alloc_channel
  365. * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
  366. * event to be generated
  367. * @evt_id: type of event triggered by the associated TRE
  368. * (corresponding to xfer_user_data)
  369. * @bytes_xfered: number of bytes transferred by the associated TRE
  370. * (corresponding to xfer_user_data)
  371. * @veid: virtual endpoint id. Valid for GCI completions only
  372. *
  373. */
  374. struct gsi_chan_xfer_notify {
  375. void *chan_user_data;
  376. void *xfer_user_data;
  377. enum gsi_chan_evt evt_id;
  378. uint16_t bytes_xfered;
  379. uint8_t veid;
  380. };
  381. enum gsi_chan_err {
  382. GSI_CHAN_INVALID_TRE_ERR = 0x0,
  383. GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
  384. GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
  385. GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
  386. GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
  387. GSI_CHAN_HWO_1_ERR = 0x5
  388. };
  389. /**
  390. * gsi_chan_err_notify - Channel general callback info
  391. *
  392. * @chan_user_data: cookie supplied in gsi_alloc_channel
  393. * @evt_id: type of error
  394. * @err_desc: more info about the error
  395. *
  396. */
  397. struct gsi_chan_err_notify {
  398. void *chan_user_data;
  399. enum gsi_chan_err evt_id;
  400. uint16_t err_desc;
  401. };
  402. enum gsi_chan_ring_elem_size {
  403. GSI_CHAN_RE_SIZE_4B = 4,
  404. GSI_CHAN_RE_SIZE_8B = 8,
  405. GSI_CHAN_RE_SIZE_16B = 16,
  406. GSI_CHAN_RE_SIZE_32B = 32,
  407. GSI_CHAN_RE_SIZE_64B = 64,
  408. };
  409. enum gsi_chan_use_db_eng {
  410. GSI_CHAN_DIRECT_MODE = 0x0,
  411. GSI_CHAN_DB_MODE = 0x1,
  412. };
  413. /**
  414. * gsi_chan_props - Channel related properties
  415. *
  416. * @prot: interface type
  417. * @dir: channel direction
  418. * @ch_id: virtual channel ID
  419. * @evt_ring_hdl: handle of associated event ring. set to ~0 if no
  420. * event ring associated
  421. * @re_size: size of channel ring element
  422. * @ring_len: length of ring in bytes (must be integral multiple of
  423. * re_size)
  424. * @max_re_expected: maximal number of ring elements expected to be queued.
  425. * used for data path statistics gathering. if 0 provided
  426. * ring_len / re_size will be used.
  427. * @ring_base_addr: physical base address of ring. Address must be aligned to
  428. * ring_len rounded to power of two
  429. * @ring_base_vaddr: virtual base address of ring (set to NULL when not
  430. * applicable)
  431. * @use_db_eng: 0 => direct mode (doorbells are written directly to RE
  432. * engine)
  433. * 1 => DB mode (doorbells are written to DB engine)
  434. * @max_prefetch: limit number of pre-fetch segments for channel
  435. * @low_weight: low channel weight (priority of channel for RE engine
  436. * round robin algorithm); must be >= 1
  437. * @empty_lvl_threshold:
  438. * The thershold number of free entries available in the
  439. * receiving fifos of GSI-peripheral. If Smart PF mode
  440. * is used, REE will fetch/send new TRE to peripheral only
  441. * if peripheral's empty_level_count is higher than
  442. * EMPTY_LVL_THRSHOLD defined for this channel
  443. * @tx_poll: channel process completions in NAPI context
  444. * @xfer_cb: transfer notification callback, this callback happens
  445. * on event boundaries
  446. *
  447. * e.g. 1
  448. *
  449. * out TD with 3 REs
  450. *
  451. * RE1: EOT=0, EOB=0, CHAIN=1;
  452. * RE2: EOT=0, EOB=0, CHAIN=1;
  453. * RE3: EOT=1, EOB=0, CHAIN=0;
  454. *
  455. * the callback will be triggered for RE3 using the
  456. * xfer_user_data of that RE
  457. *
  458. * e.g. 2
  459. *
  460. * in REs
  461. *
  462. * RE1: EOT=1, EOB=0, CHAIN=0;
  463. * RE2: EOT=1, EOB=0, CHAIN=0;
  464. * RE3: EOT=1, EOB=0, CHAIN=0;
  465. *
  466. * received packet consumes all of RE1, RE2 and part of RE3
  467. * for EOT condition. there will be three callbacks in below
  468. * order
  469. *
  470. * callback for RE1 using GSI_CHAN_EVT_OVERFLOW
  471. * callback for RE2 using GSI_CHAN_EVT_OVERFLOW
  472. * callback for RE3 using GSI_CHAN_EVT_EOT
  473. *
  474. * @err_cb: error notification callback
  475. * @cleanup_cb; cleanup rx-pkt/skb callback
  476. * @chan_user_data: cookie used for notifications
  477. *
  478. * All the callbacks are in interrupt context
  479. *
  480. */
  481. struct gsi_chan_props {
  482. enum gsi_chan_prot prot;
  483. enum gsi_chan_dir dir;
  484. uint8_t ch_id;
  485. unsigned long evt_ring_hdl;
  486. enum gsi_chan_ring_elem_size re_size;
  487. uint32_t ring_len;
  488. uint16_t max_re_expected;
  489. uint64_t ring_base_addr;
  490. uint8_t db_in_bytes;
  491. uint8_t low_latency_en;
  492. void *ring_base_vaddr;
  493. enum gsi_chan_use_db_eng use_db_eng;
  494. enum gsi_max_prefetch max_prefetch;
  495. uint8_t low_weight;
  496. enum gsi_prefetch_mode prefetch_mode;
  497. uint8_t empty_lvl_threshold;
  498. bool tx_poll;
  499. void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
  500. void (*err_cb)(struct gsi_chan_err_notify *notify);
  501. void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data);
  502. void *chan_user_data;
  503. };
  504. enum gsi_xfer_flag {
  505. GSI_XFER_FLAG_CHAIN = 0x1,
  506. GSI_XFER_FLAG_EOB = 0x100,
  507. GSI_XFER_FLAG_EOT = 0x200,
  508. GSI_XFER_FLAG_BEI = 0x400
  509. };
  510. enum gsi_xfer_elem_type {
  511. GSI_XFER_ELEM_DATA,
  512. GSI_XFER_ELEM_IMME_CMD,
  513. GSI_XFER_ELEM_NOP,
  514. };
  515. /**
  516. * gsi_gpi_channel_scratch - GPI protocol SW config area of
  517. * channel scratch
  518. *
  519. * @dl_nlo_channel: Whether this is DL NLO Channel or not? Relevant for
  520. * GSI 2.5 and above where DL NLO introduced.
  521. * @max_outstanding_tre: Used for the prefetch management sequence by the
  522. * sequencer. Defines the maximum number of allowed
  523. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  524. * prefetch will be limited by this configuration. It
  525. * is suggested to configure this value to IPA_IF
  526. * channel TLV queue size times element size. To disable
  527. * the feature in doorbell mode (DB Mode=1). Maximum
  528. * outstanding TREs should be set to 64KB
  529. * (or any value larger or equal to ring length . RLEN)
  530. * The field is irrelevant starting GSI 2.5 where smart
  531. * prefetch implemented by the H/W.
  532. * @outstanding_threshold: Used for the prefetch management sequence by the
  533. * sequencer. Defines the threshold (in Bytes) as to when
  534. * to update the channel doorbell. Should be smaller than
  535. * Maximum outstanding TREs. value. It is suggested to
  536. * configure this value to 2 * element size.
  537. * The field is irrelevant starting GSI 2.5 where smart
  538. * prefetch implemented by the H/W.
  539. */
  540. struct __packed gsi_gpi_channel_scratch {
  541. uint64_t dl_nlo_channel:1; /* Relevant starting GSI 2.5 */
  542. uint64_t resvd1:63;
  543. uint32_t resvd2:16;
  544. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  545. uint32_t resvd3:16;
  546. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  547. };
  548. /**
  549. * gsi_mhi_channel_scratch - MHI protocol SW config area of
  550. * channel scratch
  551. *
  552. * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines
  553. * address in host from which channel write pointer
  554. * should be read in polling mode
  555. * @assert_bit40: 1: bit #41 in address should be asserted upon
  556. * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
  557. * transfers)
  558. * 0: bit #41 in address should be deasserted upon
  559. * IPA_IF.ProcessDescriptor routine (for non-MHI over
  560. * PCIe transfers)
  561. * @polling_configuration: Uplink channels: Defines timer to poll on MHI
  562. * context. Range: 1 to 31 milliseconds.
  563. * Downlink channel: Defines transfer ring buffer
  564. * availability threshold to poll on MHI context in
  565. * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
  566. * elements. E.g., value of 2 indicates 16 ring elements.
  567. * Valid only when Burst Mode Enabled is set to 1
  568. * @burst_mode_enabled: 0: Burst mode is disabled for this channel
  569. * 1: Burst mode is enabled for this channel
  570. * @polling_mode: 0: the channel is not in polling mode, meaning the
  571. * host should ring DBs.
  572. * 1: the channel is in polling mode, meaning the host
  573. * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
  574. * ring elements.
  575. * should not ring DBs until notified of DB mode/OOB mode
  576. * @max_outstanding_tre: Used for the prefetch management sequence by the
  577. * sequencer. Defines the maximum number of allowed
  578. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  579. * prefetch will be limited by this configuration. It
  580. * is suggested to configure this value to IPA_IF
  581. * channel TLV queue size times element size.
  582. * To disable the feature in doorbell mode (DB Mode=1).
  583. * Maximum outstanding TREs should be set to 64KB
  584. * (or any value larger or equal to ring length . RLEN)
  585. * The field is irrelevant starting GSI 2.5 where smart
  586. * prefetch implemented by the H/W.
  587. * @outstanding_threshold: Used for the prefetch management sequence by the
  588. * sequencer. Defines the threshold (in Bytes) as to when
  589. * to update the channel doorbell. Should be smaller than
  590. * Maximum outstanding TREs. value. It is suggested to
  591. * configure this value to min(TLV_FIFO_SIZE/2,8) *
  592. * element size.
  593. * The field is irrelevant starting GSI 2.5 where smart
  594. * prefetch implemented by the H/W.
  595. */
  596. struct __packed gsi_mhi_channel_scratch {
  597. uint64_t mhi_host_wp_addr;
  598. uint32_t rsvd1:1;
  599. uint32_t assert_bit40:1;
  600. uint32_t polling_configuration:5;
  601. uint32_t burst_mode_enabled:1;
  602. uint32_t polling_mode:1;
  603. uint32_t oob_mod_threshold:5;
  604. uint32_t resvd2:2;
  605. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  606. uint32_t resvd3:16;
  607. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  608. };
  609. /**
  610. * gsi_mhi_channel_scratch_v2 - MHI protocol SW config area of
  611. * channel scratch
  612. *
  613. * @mhi_host_wp_addr_lo: Valid only when UL/DL Sync En is asserted. Defines
  614. * address in host from which channel write pointer
  615. * should be read in polling mode
  616. * @mhi_host_wp_addr_hi: Valid only when UL/DL Sync En is asserted. Defines
  617. * address in host from which channel write pointer
  618. * should be read in polling mode
  619. * @assert_bit40: 1: bit #41 in address should be asserted upon
  620. * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
  621. * transfers)
  622. * 0: bit #41 in address should be deasserted upon
  623. * IPA_IF.ProcessDescriptor routine (for non-MHI over
  624. * PCIe transfers)
  625. * @polling_configuration: Uplink channels: Defines timer to poll on MHI
  626. * context. Range: 1 to 31 milliseconds.
  627. * Downlink channel: Defines transfer ring buffer
  628. * availability threshold to poll on MHI context in
  629. * multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
  630. * elements. E.g., value of 2 indicates 16 ring elements.
  631. * Valid only when Burst Mode Enabled is set to 1
  632. * @burst_mode_enabled: 0: Burst mode is disabled for this channel
  633. * 1: Burst mode is enabled for this channel
  634. * @polling_mode: 0: the channel is not in polling mode, meaning the
  635. * host should ring DBs.
  636. * 1: the channel is in polling mode, meaning the host
  637. * @oob_mod_threshold: Defines OOB moderation threshold. Units are in 8
  638. * ring elements.
  639. * should not ring DBs until notified of DB mode/OOB mode
  640. */
  641. struct __packed gsi_mhi_channel_scratch_v2 {
  642. uint32_t mhi_host_wp_addr_lo;
  643. uint32_t mhi_host_wp_addr_hi : 9;
  644. uint32_t polling_configuration : 5;
  645. uint32_t rsvd1 : 18;
  646. uint32_t rsvd2 : 1;
  647. uint32_t assert_bit40 : 1;
  648. uint32_t resvd3 : 5;
  649. uint32_t burst_mode_enabled : 1;
  650. uint32_t polling_mode : 1;
  651. uint32_t oob_mod_threshold : 5;
  652. uint32_t resvd4 : 18; /* Not configured by AP */
  653. uint32_t resvd5; /* Not configured by AP */
  654. };
  655. /**
  656. * gsi_xdci_channel_scratch - xDCI protocol SW config area of
  657. * channel scratch
  658. *
  659. * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi
  660. * configuration). Must be aligned to Max USB Packet Size
  661. * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
  662. * transfer resource index for the transfer, which was
  663. * returned in response to the Start Transfer command.
  664. * This field is used for "Update Transfer" command
  665. * @last_trb_addr: Address (LSB - based on alignment restrictions) of
  666. * last TRB in queue. Used to identify rollover case
  667. * @depcmd_low_addr: Used to generate "Update Transfer" command
  668. * @max_outstanding_tre: Used for the prefetch management sequence by the
  669. * sequencer. Defines the maximum number of allowed
  670. * outstanding TREs in IPA/GSI (in Bytes). RE engine
  671. * prefetch will be limited by this configuration. It
  672. * is suggested to configure this value to IPA_IF
  673. * channel TLV queue size times element size.
  674. * To disable the feature in doorbell mode (DB Mode=1)
  675. * Maximum outstanding TREs should be set to 64KB
  676. * (or any value larger or equal to ring length . RLEN)
  677. * The field is irrelevant starting GSI 2.5 where smart
  678. * prefetch implemented by the H/W.
  679. * @depcmd_hi_addr: Used to generate "Update Transfer" command
  680. * @outstanding_threshold: Used for the prefetch management sequence by the
  681. * sequencer. Defines the threshold (in Bytes) as to when
  682. * to update the channel doorbell. Should be smaller than
  683. * Maximum outstanding TREs. value. It is suggested to
  684. * configure this value to 2 * element size. for MBIM the
  685. * suggested configuration is the element size.
  686. * The field is irrelevant starting GSI 2.5 where smart
  687. * prefetch implemented by the H/W.
  688. */
  689. struct __packed gsi_xdci_channel_scratch {
  690. uint32_t last_trb_addr:16;
  691. uint32_t resvd1:4;
  692. uint32_t xferrscidx:7;
  693. uint32_t const_buffer_size:5;
  694. uint32_t depcmd_low_addr;
  695. uint32_t depcmd_hi_addr:8;
  696. uint32_t resvd2:8;
  697. uint32_t max_outstanding_tre:16; /* Not relevant starting GSI 2.5 */
  698. uint32_t resvd3:16;
  699. uint32_t outstanding_threshold:16; /* Not relevant starting GSI 2.5 */
  700. };
  701. /**
  702. * gsi_wdi_channel_scratch - WDI protocol SW config area of
  703. * channel scratch
  704. *
  705. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  706. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  707. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  708. * N is the number of packets that IPA will
  709. * process before Wifi transfer ring Ri will
  710. * be updated.
  711. * @update_ri_moderation_counter: This field is incremented with each TRE
  712. * processed in MCS.
  713. * @wdi_rx_tre_proc_in_progress: It is set if IPA IF returned BECAME FULL
  714. * status after MCS submitted an inline immediate
  715. * command to update the metadata. It allows MCS
  716. * to know that it has to retry sending the TRE
  717. * to IPA.
  718. * @wdi_rx_vdev_id: Rx only. Initialized to 0xFF by SW after allocating channel
  719. * and before starting it. Both FW_DESC and VDEV_ID are part
  720. * of a scratch word that is Read/Write for both MCS and SW.
  721. * To avoid race conditions, SW should not update this field
  722. * after starting the channel.
  723. * @wdi_rx_fw_desc: Rx only. Initialized to 0xFF by SW after allocating channel
  724. * and before starting it. After Start, this is a Read only
  725. * field for SW.
  726. * @endp_metadatareg_offset: Rx only, the offset of IPA_ENDP_INIT_HDR_METADATA
  727. * of the corresponding endpoint in 4B words from IPA
  728. * base address. Read only field for MCS.
  729. * Write for SW.
  730. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  731. * for MCS. Write for SW.
  732. * @wdi_rx_pkt_length: If WDI_RX_TRE_PROC_IN_PROGRESS is set, this field is
  733. * valid and contains the packet length of the TRE that
  734. * needs to be submitted to IPA.
  735. * @resv1: reserved bits.
  736. * @pkt_comp_count: It is incremented on each AOS received. When event ring
  737. * Write index is updated, it is decremented by the same
  738. * amount.
  739. * @stop_in_progress_stm: If a Stop request is in progress, this will indicate
  740. * the current stage of processing of the stop within MCS
  741. * @resv2: reserved bits.
  742. * wdi_rx_qmap_id_internal: Initialized to 0 by MCS when the channel is
  743. * allocated. It is updated to the current value of SW
  744. * QMAP ID that is being written by MCS to the IPA
  745. * metadata register.
  746. */
  747. struct __packed gsi_wdi_channel_scratch {
  748. uint32_t wifi_rx_ri_addr_low;
  749. uint32_t wifi_rx_ri_addr_high;
  750. uint32_t update_ri_moderation_threshold:5;
  751. uint32_t update_ri_moderation_counter:6;
  752. uint32_t wdi_rx_tre_proc_in_progress:1;
  753. uint32_t resv1:4;
  754. uint32_t wdi_rx_vdev_id:8;
  755. uint32_t wdi_rx_fw_desc:8;
  756. uint32_t endp_metadatareg_offset:16;
  757. uint32_t qmap_id:16;
  758. uint32_t wdi_rx_pkt_length:16;
  759. uint32_t resv2:2;
  760. uint32_t pkt_comp_count:11;
  761. uint32_t stop_in_progress_stm:3;
  762. uint32_t resv3:16;
  763. uint32_t wdi_rx_qmap_id_internal:16;
  764. };
  765. /**
  766. * gsi_wdi2_channel_scratch_lito - WDI protocol SW config area of
  767. * channel scratch
  768. *
  769. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  770. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  771. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  772. * N is the number of packets that IPA will
  773. * process before Wifi transfer ring Ri will
  774. * be updated.
  775. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  776. * for MCS. Write for SW.
  777. * @endp_metadatareg_offset: Rx only, the offset of IPA_ENDP_INIT_HDR_METADATA
  778. * of the corresponding endpoint in 4B words from IPA
  779. * base address. Read only field for MCS.
  780. * Write for SW.
  781. * @wdi_rx_vdev_id: Rx only. Initialized to 0xFF by SW after allocating channel
  782. * and before starting it. Both FW_DESC and VDEV_ID are part
  783. * of a scratch word that is Read/Write for both MCS and SW.
  784. * To avoid race conditions, SW should not update this field
  785. * after starting the channel.
  786. * @wdi_rx_fw_desc: Rx only. Initialized to 0xFF by SW after allocating channel
  787. * and before starting it. After Start, this is a Read only
  788. * field for SW.
  789. * @update_ri_moderation_counter: This field is incremented with each TRE
  790. * processed in MCS.
  791. * @wdi_rx_tre_proc_in_progress: It is set if IPA IF returned BECAME FULL
  792. * status after MCS submitted an inline immediate
  793. * command to update the metadata. It allows MCS
  794. * to know that it has to retry sending the TRE
  795. * to IPA.
  796. * @outstanding_tlvs_counter: It is the count of outstanding TLVs submitted to
  797. * IPA by MCS and waiting for AOS completion from IPA.
  798. * @wdi_rx_pkt_length: If WDI_RX_TRE_PROC_IN_PROGRESS is set, this field is
  799. * valid and contains the packet length of the TRE that
  800. * needs to be submitted to IPA.
  801. * @resv1: reserved bits.
  802. * @pkt_comp_count: It is incremented on each AOS received. When event ring
  803. * Write index is updated, it is decremented by the same
  804. * amount.
  805. * @stop_in_progress_stm: If a Stop request is in progress, this will indicate
  806. * the current stage of processing of the stop within MCS
  807. * @resv2: reserved bits.
  808. * wdi_rx_qmap_id_internal: Initialized to 0 by MCS when the channel is
  809. * allocated. It is updated to the current value of SW
  810. * QMAP ID that is being written by MCS to the IPA
  811. * metadata register.
  812. */
  813. struct __packed gsi_wdi2_channel_scratch_new {
  814. uint32_t wifi_rx_ri_addr_low;
  815. uint32_t wifi_rx_ri_addr_high;
  816. uint32_t update_ri_moderation_threshold:5;
  817. uint32_t qmap_id:8;
  818. uint32_t resv1:3;
  819. uint32_t endp_metadatareg_offset:16;
  820. uint32_t wdi_rx_vdev_id:8;
  821. uint32_t wdi_rx_fw_desc:8;
  822. uint32_t update_ri_moderation_counter:6;
  823. uint32_t wdi_rx_tre_proc_in_progress:1;
  824. uint32_t resv4:1;
  825. uint32_t outstanding_tlvs_counter:8;
  826. uint32_t wdi_rx_pkt_length:16;
  827. uint32_t resv2:2;
  828. uint32_t pkt_comp_count:11;
  829. uint32_t stop_in_progress_stm:3;
  830. uint32_t resv3:16;
  831. uint32_t wdi_rx_qmap_id_internal:16;
  832. };
  833. /**
  834. * gsi_mhip_channel_scratch - MHI PRIME protocol SW config area of
  835. * channel scratch
  836. * @assert_bit_40: Valid only for non-host channels.
  837. * Set to 1 for MHI’ channels when running over PCIe.
  838. * @host_channel: Set to 1 for MHIP channel running on host.
  839. *
  840. */
  841. struct __packed gsi_mhip_channel_scratch {
  842. uint32_t assert_bit_40:1;
  843. uint32_t host_channel:1;
  844. uint32_t resvd1:30;
  845. };
  846. /**
  847. * gsi_11ad_rx_channel_scratch - 11AD protocol SW config area of
  848. * RX channel scratch
  849. *
  850. * @status_ring_hwtail_address_lsb: Low 32 bits of status ring hwtail address.
  851. * @status_ring_hwtail_address_msb: High 32 bits of status ring hwtail address.
  852. * @data_buffers_base_address_lsb: Low 32 bits of the data buffers address.
  853. * @data_buffers_base_address_msb: High 32 bits of the data buffers address.
  854. * @fixed_data_buffer_size: the fixed buffer size (> MTU).
  855. * @resv1: reserved bits.
  856. */
  857. struct __packed gsi_11ad_rx_channel_scratch {
  858. uint32_t status_ring_hwtail_address_lsb;
  859. uint32_t status_ring_hwtail_address_msb;
  860. uint32_t data_buffers_base_address_lsb;
  861. uint32_t data_buffers_base_address_msb:8;
  862. uint32_t fixed_data_buffer_size_pow_2:16;
  863. uint32_t resv1:8;
  864. };
  865. /**
  866. * gsi_11ad_tx_channel_scratch - 11AD protocol SW config area of
  867. * TX channel scratch
  868. *
  869. * @status_ring_hwtail_address_lsb: Low 32 bits of status ring hwtail address.
  870. * @status_ring_hwhead_address_lsb: Low 32 bits of status ring hwhead address.
  871. * @status_ring_hwhead_hwtail_8_msb: higher 8 msbs of status ring
  872. * hwhead\hwtail addresses (should be identical).
  873. * @update_status_hwtail_mod_threshold: The threshold in (32B) elements for
  874. * updating descriptor ring 11ad HWTAIL pointer moderation.
  875. * @status_ring_num_elem - the number of elements in the status ring.
  876. * @resv1: reserved bits.
  877. * @fixed_data_buffer_size_pow_2: the fixed buffer size power of 2 (> MTU).
  878. * @resv2: reserved bits.
  879. */
  880. struct __packed gsi_11ad_tx_channel_scratch {
  881. uint32_t status_ring_hwtail_address_lsb;
  882. uint32_t status_ring_hwhead_address_lsb;
  883. uint32_t status_ring_hwhead_hwtail_8_msb:8;
  884. uint32_t update_status_hwtail_mod_threshold:8;
  885. uint32_t status_ring_num_elem:16;
  886. uint32_t resv1:8;
  887. uint32_t fixed_data_buffer_size_pow_2:16;
  888. uint32_t resv2:8;
  889. };
  890. /**
  891. * gsi_wdi3_channel_scratch - WDI protocol 3 SW config area of
  892. * channel scratch
  893. *
  894. * @wifi_rx_ri_addr_low: Low 32 bits of Transfer ring Read Index address.
  895. * @wifi_rx_ri_addr_high: High 32 bits of Transfer ring Read Index address.
  896. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  897. * N is the number of packets that IPA will
  898. * process before Wifi transfer ring Ri will
  899. * be updated.
  900. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only field
  901. * for MCS. Write for SW.
  902. * @resv: reserved bits.
  903. * @endp_metadata_reg_offset: Rx only, the offset of
  904. * IPA_ENDP_INIT_HDR_METADATA_n of the
  905. * corresponding endpoint in 4B words from IPA
  906. * base address.
  907. * @rx_pkt_offset: Rx only, Since Rx header length is not fixed,
  908. * WLAN host will pass this information to IPA.
  909. * @resv: reserved bits.
  910. */
  911. struct __packed gsi_wdi3_channel_scratch {
  912. uint32_t wifi_rp_address_low;
  913. uint32_t wifi_rp_address_high;
  914. uint32_t update_rp_moderation_threshold : 5;
  915. uint32_t qmap_id : 8;
  916. uint32_t reserved1 : 3;
  917. uint32_t endp_metadata_reg_offset : 16;
  918. uint32_t rx_pkt_offset : 16;
  919. uint32_t reserved2 : 16;
  920. };
  921. /**
  922. * gsi_qdss_channel_scratch - QDSS SW config area of
  923. * channel scratch
  924. *
  925. * @bam_p_evt_dest_addr: equivalent to event_ring_doorbell_pa
  926. * physical address of the doorbell that IPA uC
  927. * will update the headpointer of the event ring.
  928. * QDSS should send BAM_P_EVNT_REG address in this var
  929. * Configured with the GSI Doorbell Address.
  930. * GSI sends Update RP by doing a write to this address
  931. * @data_fifo_base_addr: Base address of the data FIFO used by BAM
  932. * @data_fifo_size: Size of the data FIFO
  933. * @bam_p_evt_threshold: Threshold level of how many bytes consumed
  934. * @override_eot: if override EOT==1, it doesn't check the EOT bit in
  935. * the descriptor
  936. */
  937. struct __packed gsi_qdss_channel_scratch {
  938. uint32_t bam_p_evt_dest_addr;
  939. uint32_t data_fifo_base_addr;
  940. uint32_t data_fifo_size : 16;
  941. uint32_t bam_p_evt_threshold : 16;
  942. uint32_t reserved1 : 2;
  943. uint32_t override_eot : 1;
  944. uint32_t reserved2 : 29;
  945. };
  946. /**
  947. * gsi_wdi3_channel_scratch2 - WDI3 protocol SW config area of
  948. * channel scratch2
  949. *
  950. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  951. * N is the number of packets that IPA will
  952. * process before Wifi transfer ring Ri will
  953. * be updated.
  954. * @qmap_id: Rx only, used for setting metadata register in IPA. Read only
  955. * field for MCS. Write for SW.
  956. * @resv: reserved bits.
  957. * @endp_metadata_reg_offset: Rx only, the offset of
  958. * IPA_ENDP_INIT_HDR_METADATA_n of the
  959. * corresponding endpoint in 4B words from IPA
  960. * base address.
  961. */
  962. struct __packed gsi_wdi3_channel_scratch2 {
  963. uint32_t update_rp_moderation_threshold : 5;
  964. uint32_t qmap_id : 8;
  965. uint32_t reserved1 : 3;
  966. uint32_t endp_metadata_reg_offset : 16;
  967. };
  968. /**
  969. * gsi_wdi3_channel_scratch2_reg - channel scratch2 SW config area
  970. *
  971. */
  972. union __packed gsi_wdi3_channel_scratch2_reg {
  973. struct __packed gsi_wdi3_channel_scratch2 wdi;
  974. struct __packed {
  975. uint32_t word1;
  976. } data;
  977. };
  978. /**
  979. * gsi_rtk_channel_scratch - Realtek SW config area of
  980. * channel scratch
  981. *
  982. * @rtk_bar_low: Realtek bar address LSB
  983. * @rtk_bar_high: Realtek bar address MSB
  984. * @queue_number: dma channel number in rtk
  985. * @fix_buff_size: buff size in KB
  986. * @rtk_buff_addr_high: buffer addr where TRE points to
  987. * @rtk_buff_addr_low: buffer addr where TRE points to
  988. * the descriptor
  989. */
  990. struct __packed gsi_rtk_channel_scratch {
  991. uint32_t rtk_bar_low;
  992. uint32_t rtk_bar_high : 9;
  993. uint32_t queue_number : 5;
  994. uint32_t fix_buff_size : 4;
  995. uint32_t reserved1 : 6;
  996. uint32_t rtk_buff_addr_high : 8;
  997. uint32_t rtk_buff_addr_low;
  998. uint32_t reserved2;
  999. };
  1000. /**
  1001. * gsi_aqc_channel_scratch - AQC SW config area of
  1002. * channel scratch
  1003. *
  1004. * @buff_addr_lsb: AQC buffer address LSB (RX)
  1005. * @buff_addr_msb: AQC buffer address MSB (RX)
  1006. * @fix_buff_size: buff size in log2
  1007. * @head_ptr_lsb: head pointer address LSB (RX)
  1008. * @head_ptr_msb: head pointer address MSB (RX)
  1009. */
  1010. struct __packed gsi_aqc_channel_scratch {
  1011. uint32_t buff_addr_lsb;
  1012. uint32_t buff_addr_msb : 8;
  1013. uint32_t reserved1 : 8;
  1014. unsigned fix_buff_size : 16;
  1015. uint32_t head_ptr_lsb;
  1016. uint32_t head_ptr_msb : 9;
  1017. uint32_t reserved2 : 23;
  1018. };
  1019. /**
  1020. * gsi_ntn_channel_scratch - NTN SW config area of
  1021. * channel scratch
  1022. *
  1023. * @buff_addr_lsb: NTN buffer address LSB
  1024. * @buff_addr_msb: NTN buffer address MSB
  1025. * @fix_buff_size: buff size in log2
  1026. * @ioc_mod_threshold: the threshold for IOC moderation (TX)
  1027. */
  1028. struct __packed gsi_ntn_channel_scratch {
  1029. uint32_t buff_addr_lsb;
  1030. uint32_t buff_addr_msb : 8;
  1031. uint32_t fix_buff_size : 4;
  1032. uint32_t reserved1 : 20;
  1033. uint32_t ioc_mod_threshold : 16;
  1034. uint32_t reserved2 : 16;
  1035. uint32_t reserved3;
  1036. uint32_t reserved4;
  1037. };
  1038. /**
  1039. * gsi_channel_scratch - channel scratch SW config area
  1040. *
  1041. */
  1042. union __packed gsi_channel_scratch {
  1043. struct __packed gsi_gpi_channel_scratch gpi;
  1044. struct __packed gsi_mhi_channel_scratch mhi;
  1045. struct __packed gsi_mhi_channel_scratch_v2 mhi_v2;
  1046. struct __packed gsi_xdci_channel_scratch xdci;
  1047. struct __packed gsi_wdi_channel_scratch wdi;
  1048. struct __packed gsi_11ad_rx_channel_scratch rx_11ad;
  1049. struct __packed gsi_11ad_tx_channel_scratch tx_11ad;
  1050. struct __packed gsi_wdi3_channel_scratch wdi3;
  1051. struct __packed gsi_mhip_channel_scratch mhip;
  1052. struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
  1053. struct __packed gsi_aqc_channel_scratch aqc;
  1054. struct __packed gsi_rtk_channel_scratch rtk;
  1055. struct __packed gsi_ntn_channel_scratch ntn;
  1056. struct __packed gsi_qdss_channel_scratch qdss;
  1057. struct __packed {
  1058. uint32_t word1;
  1059. uint32_t word2;
  1060. uint32_t word3;
  1061. uint32_t word4;
  1062. } data;
  1063. };
  1064. /**
  1065. * gsi_wdi_channel_scratch3 - WDI protocol SW config area of
  1066. * channel scratch3
  1067. */
  1068. struct __packed gsi_wdi_channel_scratch3 {
  1069. uint32_t endp_metadatareg_offset:16;
  1070. uint32_t qmap_id:16;
  1071. };
  1072. /**
  1073. * gsi_wdi_channel_scratch3_reg - channel scratch3 SW config area
  1074. *
  1075. */
  1076. union __packed gsi_wdi_channel_scratch3_reg {
  1077. struct __packed gsi_wdi_channel_scratch3 wdi;
  1078. struct __packed {
  1079. uint32_t word1;
  1080. } data;
  1081. };
  1082. /**
  1083. * gsi_wdi2_channel_scratch2 - WDI protocol SW config area of
  1084. * channel scratch2
  1085. */
  1086. struct __packed gsi_wdi2_channel_scratch2 {
  1087. uint32_t update_ri_moderation_threshold:5;
  1088. uint32_t qmap_id:8;
  1089. uint32_t resv1:3;
  1090. uint32_t endp_metadatareg_offset:16;
  1091. };
  1092. /**
  1093. * gsi_wdi_channel_scratch2_reg - channel scratch2 SW config area
  1094. *
  1095. */
  1096. union __packed gsi_wdi2_channel_scratch2_reg {
  1097. struct __packed gsi_wdi2_channel_scratch2 wdi;
  1098. struct __packed {
  1099. uint32_t word1;
  1100. } data;
  1101. };
  1102. /**
  1103. * gsi_mhi_evt_scratch - MHI protocol SW config area of
  1104. * event scratch
  1105. */
  1106. struct __packed gsi_mhi_evt_scratch {
  1107. uint32_t resvd1;
  1108. uint32_t resvd2;
  1109. };
  1110. /**
  1111. * gsi_mhip_evt_scratch - MHI PRIME protocol SW config area of
  1112. * event scratch
  1113. */
  1114. struct __packed gsi_mhip_evt_scratch {
  1115. uint32_t rp_mod_threshold:8;
  1116. uint32_t rp_mod_timer:4;
  1117. uint32_t rp_mod_counter:8;
  1118. uint32_t rp_mod_timer_id:4;
  1119. uint32_t rp_mod_timer_running:1;
  1120. uint32_t resvd1:7;
  1121. uint32_t fixed_buffer_sz:16;
  1122. uint32_t resvd2:16;
  1123. };
  1124. /**
  1125. * gsi_xdci_evt_scratch - xDCI protocol SW config area of
  1126. * event scratch
  1127. *
  1128. */
  1129. struct __packed gsi_xdci_evt_scratch {
  1130. uint32_t gevntcount_low_addr;
  1131. uint32_t gevntcount_hi_addr:8;
  1132. uint32_t resvd1:24;
  1133. };
  1134. /**
  1135. * gsi_wdi_evt_scratch - WDI protocol SW config area of
  1136. * event scratch
  1137. *
  1138. */
  1139. struct __packed gsi_wdi_evt_scratch {
  1140. uint32_t update_ri_moderation_config:8;
  1141. uint32_t resvd1:8;
  1142. uint32_t update_ri_mod_timer_running:1;
  1143. uint32_t evt_comp_count:14;
  1144. uint32_t resvd2:1;
  1145. uint32_t last_update_ri:16;
  1146. uint32_t resvd3:16;
  1147. };
  1148. /**
  1149. * gsi_11ad_evt_scratch - 11AD protocol SW config area of
  1150. * event scratch
  1151. *
  1152. */
  1153. struct __packed gsi_11ad_evt_scratch {
  1154. uint32_t update_status_hwtail_mod_threshold : 8;
  1155. uint32_t resvd1:8;
  1156. uint32_t resvd2:16;
  1157. uint32_t resvd3;
  1158. };
  1159. /**
  1160. * gsi_wdi3_evt_scratch - wdi3 protocol SW config area of
  1161. * event scratch
  1162. * @update_ri_moderation_threshold: Threshold N for Transfer ring Read Index
  1163. * N is the number of packets that IPA will
  1164. * process before Wifi transfer ring Ri will
  1165. * be updated.
  1166. * @reserved1: reserve bit.
  1167. * @reserved2: reserve bit.
  1168. */
  1169. struct __packed gsi_wdi3_evt_scratch {
  1170. uint32_t update_rp_moderation_config : 8;
  1171. uint32_t reserved1 : 24;
  1172. uint32_t reserved2;
  1173. };
  1174. /**
  1175. * gsi_rtk_evt_scratch - realtek protocol SW config area of
  1176. * event scratch
  1177. * @reserved1: reserve bit.
  1178. * @reserved2: reserve bit.
  1179. */
  1180. struct __packed gsi_rtk_evt_scratch {
  1181. uint32_t reserved1;
  1182. uint32_t reserved2;
  1183. };
  1184. /**
  1185. * gsi_aqc_evt_scratch - AQC protocol SW config area of
  1186. * event scratch
  1187. * @head_ptr_wrb_mod_threshold: head pointer write-back moderation threshold
  1188. * @reserved1-3: reserve bit.
  1189. */
  1190. struct __packed gsi_aqc_evt_scratch {
  1191. uint8_t head_ptr_wrb_mod_threshold;
  1192. uint8_t reserved1;
  1193. uint16_t reserved2;
  1194. uint32_t reserved3;
  1195. };
  1196. /**
  1197. * gsi_evt_scratch - event scratch SW config area
  1198. *
  1199. */
  1200. union __packed gsi_evt_scratch {
  1201. struct __packed gsi_mhi_evt_scratch mhi;
  1202. struct __packed gsi_xdci_evt_scratch xdci;
  1203. struct __packed gsi_wdi_evt_scratch wdi;
  1204. struct __packed gsi_11ad_evt_scratch w11ad;
  1205. struct __packed gsi_wdi3_evt_scratch wdi3;
  1206. struct __packed gsi_mhip_evt_scratch mhip;
  1207. struct __packed gsi_aqc_evt_scratch aqc;
  1208. struct __packed gsi_rtk_evt_scratch rtk;
  1209. struct __packed {
  1210. uint32_t word1;
  1211. uint32_t word2;
  1212. } data;
  1213. };
  1214. /**
  1215. * gsi_device_scratch - EE scratch config parameters
  1216. *
  1217. * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
  1218. * @mhi_base_chan_idx: base index of IPA MHI channel indexes.
  1219. * IPA MHI channel index = GSI channel ID +
  1220. * MHI base channel index
  1221. * @max_usb_pkt_size_valid: is max_usb_pkt_size valid?
  1222. * @max_usb_pkt_size: max USB packet size in bytes (valid values are
  1223. * 64, 512 and 1024)
  1224. */
  1225. struct gsi_device_scratch {
  1226. bool mhi_base_chan_idx_valid;
  1227. uint8_t mhi_base_chan_idx;
  1228. bool max_usb_pkt_size_valid;
  1229. uint16_t max_usb_pkt_size;
  1230. };
  1231. /**
  1232. * gsi_chan_info - information about channel occupancy
  1233. *
  1234. * @wp: channel write pointer (physical address)
  1235. * @rp: channel read pointer (physical address)
  1236. * @evt_valid: is evt* info valid?
  1237. * @evt_wp: event ring write pointer (physical address)
  1238. * @evt_rp: event ring read pointer (physical address)
  1239. */
  1240. struct gsi_chan_info {
  1241. uint64_t wp;
  1242. uint64_t rp;
  1243. bool evt_valid;
  1244. uint64_t evt_wp;
  1245. uint64_t evt_rp;
  1246. };
  1247. enum gsi_evt_ring_state {
  1248. GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
  1249. GSI_EVT_RING_STATE_ALLOCATED = 0x1,
  1250. GSI_EVT_RING_STATE_ERROR = 0xf
  1251. };
  1252. enum gsi_chan_state {
  1253. GSI_CHAN_STATE_NOT_ALLOCATED = 0x0,
  1254. GSI_CHAN_STATE_ALLOCATED = 0x1,
  1255. GSI_CHAN_STATE_STARTED = 0x2,
  1256. GSI_CHAN_STATE_STOPPED = 0x3,
  1257. GSI_CHAN_STATE_STOP_IN_PROC = 0x4,
  1258. GSI_CHAN_STATE_FLOW_CONTROL = 0x5,
  1259. GSI_CHAN_STATE_ERROR = 0xf
  1260. };
  1261. struct gsi_ring_ctx {
  1262. spinlock_t slock;
  1263. unsigned long base_va;
  1264. uint64_t base;
  1265. uint64_t wp;
  1266. uint64_t rp;
  1267. uint64_t wp_local;
  1268. uint64_t rp_local;
  1269. uint32_t len;
  1270. uint8_t elem_sz;
  1271. uint16_t max_num_elem;
  1272. uint64_t end;
  1273. };
  1274. struct gsi_chan_dp_stats {
  1275. unsigned long ch_below_lo;
  1276. unsigned long ch_below_hi;
  1277. unsigned long ch_above_hi;
  1278. unsigned long empty_time;
  1279. unsigned long last_timestamp;
  1280. };
  1281. struct gsi_chan_stats {
  1282. unsigned long queued;
  1283. unsigned long completed;
  1284. unsigned long callback_to_poll;
  1285. unsigned long poll_to_callback;
  1286. unsigned long poll_pending_irq;
  1287. unsigned long invalid_tre_error;
  1288. unsigned long poll_ok;
  1289. unsigned long poll_empty;
  1290. unsigned long userdata_in_use;
  1291. struct gsi_chan_dp_stats dp;
  1292. };
  1293. /**
  1294. * struct gsi_user_data - user_data element pointed by the TRE
  1295. * @valid: valid to be cleaned. if its true that means it is being used.
  1296. * false means its free to overwrite
  1297. * @p: pointer to the user data array element
  1298. */
  1299. struct gsi_user_data {
  1300. bool valid;
  1301. void *p;
  1302. };
  1303. struct gsi_chan_ctx {
  1304. struct gsi_chan_props props;
  1305. enum gsi_chan_state state;
  1306. struct gsi_ring_ctx ring;
  1307. struct gsi_user_data *user_data;
  1308. struct gsi_evt_ctx *evtr;
  1309. struct mutex mlock;
  1310. struct completion compl;
  1311. bool allocated;
  1312. atomic_t poll_mode;
  1313. union __packed gsi_channel_scratch scratch;
  1314. struct gsi_chan_stats stats;
  1315. bool enable_dp_stats;
  1316. bool print_dp_stats;
  1317. };
  1318. struct gsi_evt_stats {
  1319. unsigned long completed;
  1320. };
  1321. struct gsi_evt_ctx {
  1322. struct gsi_evt_ring_props props;
  1323. enum gsi_evt_ring_state state;
  1324. uint8_t id;
  1325. struct gsi_ring_ctx ring;
  1326. struct mutex mlock;
  1327. struct completion compl;
  1328. struct gsi_chan_ctx *chan[MAX_CHANNELS_SHARING_EVENT_RING];
  1329. uint8_t num_of_chan_allocated;
  1330. atomic_t chan_ref_cnt;
  1331. union __packed gsi_evt_scratch scratch;
  1332. struct gsi_evt_stats stats;
  1333. };
  1334. struct gsi_ee_scratch {
  1335. union __packed {
  1336. struct {
  1337. uint32_t inter_ee_cmd_return_code:3;
  1338. uint32_t resvd1:2;
  1339. uint32_t generic_ee_cmd_return_code:3;
  1340. uint32_t resvd2:2;
  1341. uint32_t generic_ee_cmd_return_val:3;
  1342. uint32_t resvd4:2;
  1343. uint32_t max_usb_pkt_size:1;
  1344. uint32_t resvd3:8;
  1345. uint32_t mhi_base_chan_idx:8;
  1346. } s;
  1347. uint32_t val;
  1348. } word0;
  1349. uint32_t word1;
  1350. };
  1351. struct ch_debug_stats {
  1352. unsigned long ch_allocate;
  1353. unsigned long ch_start;
  1354. unsigned long ch_stop;
  1355. unsigned long ch_reset;
  1356. unsigned long ch_de_alloc;
  1357. unsigned long ch_db_stop;
  1358. unsigned long cmd_completed;
  1359. };
  1360. struct gsi_generic_ee_cmd_debug_stats {
  1361. unsigned long halt_channel;
  1362. unsigned long flow_ctrl_channel;
  1363. };
  1364. struct gsi_coal_chan_info {
  1365. uint8_t ch_id;
  1366. uint8_t evchid;
  1367. };
  1368. struct gsi_log_ts {
  1369. u64 timestamp;
  1370. u64 qtimer;
  1371. u32 interrupt_type;
  1372. };
  1373. struct gsi_msi {
  1374. u32 num;
  1375. DECLARE_BITMAP(allocated, GSI_MAX_NUM_MSI);
  1376. DECLARE_BITMAP(used, GSI_MAX_NUM_MSI);
  1377. struct msi_msg msg[GSI_MAX_NUM_MSI];
  1378. u32 irq[GSI_MAX_NUM_MSI];
  1379. u32 evt[GSI_MAX_NUM_MSI];
  1380. unsigned long mask;
  1381. };
  1382. struct gsi_ctx {
  1383. void __iomem *base;
  1384. struct device *dev;
  1385. struct gsi_per_props per;
  1386. bool per_registered;
  1387. struct gsi_chan_ctx chan[GSI_CHAN_MAX];
  1388. struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
  1389. struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
  1390. struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
  1391. struct mutex mlock;
  1392. spinlock_t slock;
  1393. unsigned long evt_bmap;
  1394. bool enabled;
  1395. atomic_t num_chan;
  1396. atomic_t num_evt_ring;
  1397. struct gsi_ee_scratch scratch;
  1398. int num_ch_dp_stats;
  1399. struct workqueue_struct *dp_stat_wq;
  1400. u32 max_ch;
  1401. u32 max_ev;
  1402. struct completion gen_ee_cmd_compl;
  1403. void *ipc_logbuf;
  1404. void *ipc_logbuf_low;
  1405. struct gsi_coal_chan_info coal_info;
  1406. bool msi_addr_set;
  1407. uint64_t msi_addr;
  1408. struct gsi_msi msi;
  1409. /*
  1410. * The following used only on emulation systems.
  1411. */
  1412. void __iomem *intcntrlr_base;
  1413. u32 intcntrlr_mem_size;
  1414. irq_handler_t intcntrlr_gsi_isr;
  1415. irq_handler_t intcntrlr_client_isr;
  1416. struct gsi_log_ts gsi_isr_cache[GSI_ISR_CACHE_MAX];
  1417. int gsi_isr_cache_index;
  1418. atomic_t num_unclock_irq;
  1419. };
  1420. enum gsi_re_type {
  1421. GSI_RE_XFER = 0x2,
  1422. GSI_RE_IMMD_CMD = 0x3,
  1423. GSI_RE_NOP = 0x4,
  1424. GSI_RE_COAL = 0x8,
  1425. };
  1426. struct __packed gsi_tre {
  1427. uint64_t buffer_ptr;
  1428. uint16_t buf_len;
  1429. uint16_t resvd1;
  1430. uint16_t chain:1;
  1431. uint16_t resvd4:7;
  1432. uint16_t ieob:1;
  1433. uint16_t ieot:1;
  1434. uint16_t bei:1;
  1435. uint16_t resvd3:5;
  1436. uint8_t re_type;
  1437. uint8_t resvd2;
  1438. };
  1439. struct __packed gsi_gci_tre {
  1440. uint64_t buffer_ptr:41;
  1441. uint64_t resvd1:7;
  1442. uint64_t buf_len:16;
  1443. uint64_t cookie:40;
  1444. uint64_t resvd2:8;
  1445. uint64_t re_type:8;
  1446. uint64_t resvd3:8;
  1447. };
  1448. #define GSI_XFER_COMPL_TYPE_GCI 0x28
  1449. struct __packed gsi_xfer_compl_evt {
  1450. union {
  1451. uint64_t xfer_ptr;
  1452. struct {
  1453. uint64_t cookie:40;
  1454. uint64_t resvd1:24;
  1455. };
  1456. };
  1457. uint16_t len;
  1458. uint8_t veid;
  1459. uint8_t code; /* see gsi_chan_evt */
  1460. uint16_t resvd;
  1461. uint8_t type;
  1462. uint8_t chid;
  1463. };
  1464. enum gsi_err_type {
  1465. GSI_ERR_TYPE_GLOB = 0x1,
  1466. GSI_ERR_TYPE_CHAN = 0x2,
  1467. GSI_ERR_TYPE_EVT = 0x3,
  1468. };
  1469. enum gsi_err_code {
  1470. GSI_INVALID_TRE_ERR = 0x1,
  1471. GSI_OUT_OF_BUFFERS_ERR = 0x2,
  1472. GSI_OUT_OF_RESOURCES_ERR = 0x3,
  1473. GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
  1474. GSI_EVT_RING_EMPTY_ERR = 0x5,
  1475. GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
  1476. GSI_HWO_1_ERR = 0x8
  1477. };
  1478. struct __packed gsi_log_err {
  1479. uint32_t arg3:4;
  1480. uint32_t arg2:4;
  1481. uint32_t arg1:4;
  1482. uint32_t code:4;
  1483. uint32_t resvd:3;
  1484. uint32_t virt_idx:5;
  1485. uint32_t err_type:4;
  1486. uint32_t ee:4;
  1487. };
  1488. enum gsi_ch_cmd_opcode {
  1489. GSI_CH_ALLOCATE = 0x0,
  1490. GSI_CH_START = 0x1,
  1491. GSI_CH_STOP = 0x2,
  1492. GSI_CH_RESET = 0x9,
  1493. GSI_CH_DE_ALLOC = 0xa,
  1494. GSI_CH_DB_STOP = 0xb,
  1495. };
  1496. enum gsi_evt_ch_cmd_opcode {
  1497. GSI_EVT_ALLOCATE = 0x0,
  1498. GSI_EVT_RESET = 0x9,
  1499. GSI_EVT_DE_ALLOC = 0xa,
  1500. };
  1501. enum gsi_generic_ee_cmd_opcode {
  1502. GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
  1503. GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2,
  1504. GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL = 0x3,
  1505. GSI_GEN_EE_CMD_DISABLE_FLOW_CHANNEL = 0x4,
  1506. GSI_GEN_EE_CMD_QUERY_FLOW_CHANNEL = 0x5,
  1507. };
  1508. enum gsi_generic_ee_cmd_return_code {
  1509. GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
  1510. GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
  1511. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
  1512. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
  1513. GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
  1514. GSI_GEN_EE_CMD_RETURN_CODE_RETRY = 0x6,
  1515. GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES = 0x7,
  1516. };
  1517. /**
  1518. * struct gsi_hw_profiling_data - GSI profiling data
  1519. * @bp_cnt: Back Pressure occurences count
  1520. * @bp_and_pending_cnt: Back Pressure with pending back pressure count
  1521. * @mcs_busy_cnt: Cycle count for MCS busy
  1522. * @mcs_idle_cnt: Cycle count for MCS idle
  1523. */
  1524. struct gsi_hw_profiling_data {
  1525. u64 bp_cnt;
  1526. u64 bp_and_pending_cnt;
  1527. u64 mcs_busy_cnt;
  1528. u64 mcs_idle_cnt;
  1529. };
  1530. /**
  1531. * struct gsi_fw_version - GSI fw version data
  1532. * @hw: HW version
  1533. * @flavor: Flavor identifier
  1534. * @fw: FW version
  1535. */
  1536. struct gsi_fw_version {
  1537. u32 hw;
  1538. u32 flavor;
  1539. u32 fw;
  1540. };
  1541. enum gsi_generic_ee_cmd_query_retun_val {
  1542. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_PRIMARY = 0,
  1543. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_SECONDARY = 1,
  1544. GSI_GEN_EE_CMD_RETURN_VAL_FLOW_CONTROL_PENDING = 2,
  1545. };
  1546. extern struct gsi_ctx *gsi_ctx;
  1547. /**
  1548. * gsi_xfer_elem - Metadata about a single transfer
  1549. *
  1550. * @addr: physical address of buffer
  1551. * @len: size of buffer for GSI_XFER_ELEM_DATA:
  1552. * for outbound transfers this is the number of bytes to
  1553. * transfer.
  1554. * for inbound transfers, this is the maximum number of
  1555. * bytes the host expects from device in this transfer
  1556. *
  1557. * immediate command opcode for GSI_XFER_ELEM_IMME_CMD
  1558. * @flags: transfer flags, OR of all the applicable flags
  1559. *
  1560. * GSI_XFER_FLAG_BEI: Block event interrupt
  1561. * 1: Event generated by this ring element must not assert
  1562. * an interrupt to the host
  1563. * 0: Event generated by this ring element must assert an
  1564. * interrupt to the host
  1565. *
  1566. * GSI_XFER_FLAG_EOT: Interrupt on end of transfer
  1567. * 1: If an EOT condition is encountered when processing
  1568. * this ring element, an event is generated by the device
  1569. * with its completion code set to EOT.
  1570. * 0: If an EOT condition is encountered for this ring
  1571. * element, a completion event is not be generated by the
  1572. * device, unless IEOB is 1
  1573. *
  1574. * GSI_XFER_FLAG_EOB: Interrupt on end of block
  1575. * 1: Device notifies host after processing this ring element
  1576. * by sending a completion event
  1577. * 0: Completion event is not required after processing this
  1578. * ring element
  1579. *
  1580. * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
  1581. * elements in a TD
  1582. *
  1583. * @type: transfer type
  1584. *
  1585. * GSI_XFER_ELEM_DATA: for all data transfers
  1586. * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
  1587. * GSI_XFER_ELEM_NOP: for event generation only
  1588. *
  1589. * @xfer_user_data: cookie used in xfer_cb
  1590. *
  1591. */
  1592. struct gsi_xfer_elem {
  1593. uint64_t addr;
  1594. uint16_t len;
  1595. uint16_t flags;
  1596. enum gsi_xfer_elem_type type;
  1597. void *xfer_user_data;
  1598. };
  1599. /**
  1600. * gsi_alloc_evt_ring - Peripheral should call this function to
  1601. * allocate an event ring
  1602. *
  1603. * @props: Event ring properties
  1604. * @dev_hdl: Client handle previously obtained from
  1605. * gsi_register_device
  1606. * @evt_ring_hdl: Handle populated by GSI, opaque to client
  1607. *
  1608. * This function can sleep
  1609. *
  1610. * @Return gsi_status
  1611. */
  1612. int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
  1613. unsigned long *evt_ring_hdl);
  1614. /**
  1615. * gsi_dealloc_evt_ring - Peripheral should call this function to
  1616. * de-allocate an event ring. There should not exist any active
  1617. * channels using this event ring
  1618. *
  1619. * @evt_ring_hdl: Client handle previously obtained from
  1620. * gsi_alloc_evt_ring
  1621. *
  1622. * This function can sleep
  1623. *
  1624. * @Return gsi_status
  1625. */
  1626. int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
  1627. /**
  1628. * gsi_alloc_channel - Peripheral should call this function to
  1629. * allocate a channel
  1630. *
  1631. * @props: Channel properties
  1632. * @dev_hdl: Client handle previously obtained from
  1633. * gsi_register_device
  1634. * @chan_hdl: Handle populated by GSI, opaque to client
  1635. *
  1636. * This function can sleep
  1637. *
  1638. * @Return gsi_status
  1639. */
  1640. int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
  1641. unsigned long *chan_hdl);
  1642. /**
  1643. * gsi_start_channel - Peripheral should call this function to
  1644. * start a channel i.e put into running state
  1645. *
  1646. * @chan_hdl: Client handle previously obtained from
  1647. * gsi_alloc_channel
  1648. *
  1649. * This function can sleep
  1650. *
  1651. * @Return gsi_status
  1652. */
  1653. int gsi_start_channel(unsigned long chan_hdl);
  1654. /**
  1655. * gsi_reset_channel - Peripheral should call this function to
  1656. * reset a channel to recover from error state
  1657. *
  1658. * @chan_hdl: Client handle previously obtained from
  1659. * gsi_alloc_channel
  1660. *
  1661. * This function can sleep
  1662. *
  1663. * @Return gsi_status
  1664. */
  1665. int gsi_reset_channel(unsigned long chan_hdl);
  1666. /**
  1667. * gsi_dealloc_channel - Peripheral should call this function to
  1668. * de-allocate a channel
  1669. *
  1670. * @chan_hdl: Client handle previously obtained from
  1671. * gsi_alloc_channel
  1672. *
  1673. * This function can sleep
  1674. *
  1675. * @Return gsi_status
  1676. */
  1677. int gsi_dealloc_channel(unsigned long chan_hdl);
  1678. /**
  1679. * gsi_poll_channel - Peripheral should call this function to query for
  1680. * completed transfer descriptors.
  1681. *
  1682. * @chan_hdl: Client handle previously obtained from
  1683. * gsi_alloc_channel
  1684. * @notify: Information about the completed transfer if any
  1685. *
  1686. * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
  1687. * completed)
  1688. */
  1689. int gsi_poll_channel(unsigned long chan_hdl,
  1690. struct gsi_chan_xfer_notify *notify);
  1691. /**
  1692. * gsi_ring_evt_doorbell_napi - doorbell from NAPI context
  1693. * @chan_hdl: Client handle previously obtained from
  1694. * gsi_alloc_channel
  1695. *
  1696. */
  1697. void gsi_ring_evt_doorbell_polling_mode(unsigned long chan_hdl);
  1698. /**
  1699. * gsi_config_channel_mode - Peripheral should call this function
  1700. * to configure the channel mode.
  1701. *
  1702. * @chan_hdl: Client handle previously obtained from
  1703. * gsi_alloc_channel
  1704. * @mode: Mode to move the channel into
  1705. *
  1706. * @Return gsi_status
  1707. */
  1708. int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
  1709. /**
  1710. * gsi_queue_xfer - Peripheral should call this function
  1711. * to queue transfers on the given channel
  1712. *
  1713. * @chan_hdl: Client handle previously obtained from
  1714. * gsi_alloc_channel
  1715. * @num_xfers: Number of transfer in the array @ xfer
  1716. * @xfer: Array of num_xfers transfer descriptors
  1717. * @ring_db: If true, tell HW about these queued xfers
  1718. * If false, do not notify HW at this time
  1719. *
  1720. * @Return gsi_status
  1721. */
  1722. int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
  1723. struct gsi_xfer_elem *xfer, bool ring_db);
  1724. void gsi_debugfs_init(void);
  1725. uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
  1726. void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used);
  1727. /**
  1728. * gsi_register_device - Peripheral should call this function to
  1729. * register itself with GSI before invoking any other APIs
  1730. *
  1731. * @props: Peripheral properties
  1732. * @dev_hdl: Handle populated by GSI, opaque to client
  1733. *
  1734. * @Return -GSI_STATUS_AGAIN if request should be re-tried later
  1735. * other error codes for failure
  1736. */
  1737. int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
  1738. /**
  1739. * gsi_complete_clk_grant - Peripheral should call this function to
  1740. * grant the clock resource requested by GSI previously that could not
  1741. * be granted synchronously. GSI will release the clock resource using
  1742. * the rel_clk_cb when appropriate
  1743. *
  1744. * @dev_hdl: Client handle previously obtained from
  1745. * gsi_register_device
  1746. *
  1747. * @Return gsi_status
  1748. */
  1749. int gsi_complete_clk_grant(unsigned long dev_hdl);
  1750. /**
  1751. * gsi_write_device_scratch - Peripheral should call this function to
  1752. * write to the EE scratch area
  1753. *
  1754. * @dev_hdl: Client handle previously obtained from
  1755. * gsi_register_device
  1756. * @val: Value to write
  1757. *
  1758. * @Return gsi_status
  1759. */
  1760. int gsi_write_device_scratch(unsigned long dev_hdl,
  1761. struct gsi_device_scratch *val);
  1762. /**
  1763. * gsi_deregister_device - Peripheral should call this function to
  1764. * de-register itself with GSI
  1765. *
  1766. * @dev_hdl: Client handle previously obtained from
  1767. * gsi_register_device
  1768. * @force: When set to true, cleanup is performed even if there
  1769. * are in use resources like channels, event rings, etc.
  1770. * this would be used after GSI reset to recover from some
  1771. * fatal error
  1772. * When set to false, there must not exist any allocated
  1773. * channels and event rings.
  1774. *
  1775. * @Return gsi_status
  1776. */
  1777. int gsi_deregister_device(unsigned long dev_hdl, bool force);
  1778. /**
  1779. * gsi_write_evt_ring_scratch - Peripheral should call this function to
  1780. * write to the scratch area of the event ring context
  1781. *
  1782. * @evt_ring_hdl: Client handle previously obtained from
  1783. * gsi_alloc_evt_ring
  1784. * @val: Value to write
  1785. *
  1786. * @Return gsi_status
  1787. */
  1788. int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
  1789. union __packed gsi_evt_scratch val);
  1790. /**
  1791. * gsi_query_evt_ring_db_addr - Peripheral should call this function to
  1792. * query the physical addresses of the event ring doorbell registers
  1793. *
  1794. * @evt_ring_hdl: Client handle previously obtained from
  1795. * gsi_alloc_evt_ring
  1796. * @db_addr_wp_lsb: Physical address of doorbell register where the 32
  1797. * LSBs of the doorbell value should be written
  1798. * @db_addr_wp_msb: Physical address of doorbell register where the 32
  1799. * MSBs of the doorbell value should be written
  1800. *
  1801. * @Return gsi_status
  1802. */
  1803. int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
  1804. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
  1805. /**
  1806. * gsi_ring_evt_ring_db - Peripheral should call this function for
  1807. * ringing the event ring doorbell with given value
  1808. *
  1809. * @evt_ring_hdl: Client handle previously obtained from
  1810. * gsi_alloc_evt_ring
  1811. * @value: The value to be used for ringing the doorbell
  1812. *
  1813. * @Return gsi_status
  1814. */
  1815. int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value);
  1816. /**
  1817. * gsi_ring_ch_ring_db - Peripheral should call this function for
  1818. * ringing the channel ring doorbell with given value
  1819. *
  1820. * @chan_hdl: Client handle previously obtained from
  1821. * gsi_alloc_channel
  1822. * @value: The value to be used for ringing the doorbell
  1823. *
  1824. * @Return gsi_status
  1825. */
  1826. int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value);
  1827. /**
  1828. * gsi_reset_evt_ring - Peripheral should call this function to
  1829. * reset an event ring to recover from error state
  1830. *
  1831. * @evt_ring_hdl: Client handle previously obtained from
  1832. * gsi_alloc_evt_ring
  1833. *
  1834. * This function can sleep
  1835. *
  1836. * @Return gsi_status
  1837. */
  1838. int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
  1839. /**
  1840. * gsi_get_evt_ring_cfg - This function returns the current config
  1841. * of the specified event ring
  1842. *
  1843. * @evt_ring_hdl: Client handle previously obtained from
  1844. * gsi_alloc_evt_ring
  1845. * @props: where to copy properties to
  1846. * @scr: where to copy scratch info to
  1847. *
  1848. * @Return gsi_status
  1849. */
  1850. int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
  1851. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
  1852. /**
  1853. * gsi_set_evt_ring_cfg - This function applies the supplied config
  1854. * to the specified event ring.
  1855. *
  1856. * exclusive property of the event ring cannot be changed after
  1857. * gsi_alloc_evt_ring
  1858. *
  1859. * @evt_ring_hdl: Client handle previously obtained from
  1860. * gsi_alloc_evt_ring
  1861. * @props: the properties to apply
  1862. * @scr: the scratch info to apply
  1863. *
  1864. * @Return gsi_status
  1865. */
  1866. int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
  1867. struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
  1868. /**
  1869. * gsi_write_channel_scratch - Peripheral should call this function to
  1870. * write to the scratch area of the channel context
  1871. *
  1872. * @chan_hdl: Client handle previously obtained from
  1873. * gsi_alloc_channel
  1874. * @val: Value to write
  1875. *
  1876. * @Return gsi_status
  1877. */
  1878. int gsi_write_channel_scratch(unsigned long chan_hdl,
  1879. union __packed gsi_channel_scratch val);
  1880. /**
  1881. * gsi_write_channel_scratch3_reg - Peripheral should call this function to
  1882. * write to the scratch3 reg area of the channel context
  1883. *
  1884. * @chan_hdl: Client handle previously obtained from
  1885. * gsi_alloc_channel
  1886. * @val: Value to write
  1887. *
  1888. * @Return gsi_status
  1889. */
  1890. int gsi_write_channel_scratch3_reg(unsigned long chan_hdl,
  1891. union __packed gsi_wdi_channel_scratch3_reg val);
  1892. /**
  1893. * gsi_write_channel_scratch2_reg - Peripheral should call this function to
  1894. * write to the scratch2 reg area of the channel context
  1895. *
  1896. * @chan_hdl: Client handle previously obtained from
  1897. * gsi_alloc_channel
  1898. * @val: Value to write
  1899. *
  1900. * @Return gsi_status
  1901. */
  1902. int gsi_write_channel_scratch2_reg(unsigned long chan_hdl,
  1903. union __packed gsi_wdi2_channel_scratch2_reg val);
  1904. /**
  1905. * gsi_write_wdi3_channel_scratch2_reg - Peripheral should call this function
  1906. * to write to the WDI3 scratch 3 register area of the channel context
  1907. *
  1908. * @chan_hdl: Client handle previously obtained from
  1909. * gsi_alloc_channel
  1910. * @val: Read value
  1911. *
  1912. * @Return gsi_status
  1913. */
  1914. int gsi_write_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  1915. union __packed gsi_wdi3_channel_scratch2_reg val);
  1916. /**
  1917. * gsi_read_channel_scratch - Peripheral should call this function to
  1918. * read to the scratch area of the channel context
  1919. *
  1920. * @chan_hdl: Client handle previously obtained from
  1921. * gsi_alloc_channel
  1922. * @val: Read value
  1923. *
  1924. * @Return gsi_status
  1925. */
  1926. int gsi_read_channel_scratch(unsigned long chan_hdl,
  1927. union __packed gsi_channel_scratch *val);
  1928. /**
  1929. * gsi_read_wdi3_channel_scratch2_reg - Peripheral should call this function to
  1930. * read to the WDI3 scratch 2 register area of the channel context
  1931. *
  1932. * @chan_hdl: Client handle previously obtained from
  1933. * gsi_alloc_channel
  1934. * @val: Read value
  1935. *
  1936. * @Return gsi_status
  1937. */
  1938. int gsi_read_wdi3_channel_scratch2_reg(unsigned long chan_hdl,
  1939. union __packed gsi_wdi3_channel_scratch2_reg *val);
  1940. /*
  1941. * gsi_pending_irq_type - Peripheral should call this function to
  1942. * check if there is any pending irq
  1943. *
  1944. * This function can sleep
  1945. *
  1946. * @Return gsi_irq_type
  1947. */
  1948. int gsi_pending_irq_type(void);
  1949. /**
  1950. * gsi_update_mhi_channel_scratch - MHI Peripheral should call this
  1951. * function to update the scratch area of the channel context. Updating
  1952. * will be by read-modify-write method, so non SWI fields will not be
  1953. * affected
  1954. *
  1955. * @chan_hdl: Client handle previously obtained from
  1956. * gsi_alloc_channel
  1957. * @mscr: MHI Channel Scratch value
  1958. *
  1959. * @Return gsi_status
  1960. */
  1961. int gsi_update_mhi_channel_scratch(unsigned long chan_hdl,
  1962. struct __packed gsi_mhi_channel_scratch mscr);
  1963. /**
  1964. * gsi_stop_channel - Peripheral should call this function to
  1965. * stop a channel. Stop will happen on a packet boundary
  1966. *
  1967. * @chan_hdl: Client handle previously obtained from
  1968. * gsi_alloc_channel
  1969. *
  1970. * This function can sleep
  1971. *
  1972. * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
  1973. * other error codes for failure
  1974. */
  1975. int gsi_stop_channel(unsigned long chan_hdl);
  1976. /**
  1977. * gsi_stop_db_channel - Peripheral should call this function to
  1978. * stop a channel when all transfer elements till the doorbell
  1979. * have been processed
  1980. *
  1981. * @chan_hdl: Client handle previously obtained from
  1982. * gsi_alloc_channel
  1983. *
  1984. * This function can sleep
  1985. *
  1986. * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
  1987. * other error codes for failure
  1988. */
  1989. int gsi_stop_db_channel(unsigned long chan_hdl);
  1990. /**
  1991. * gsi_query_channel_db_addr - Peripheral should call this function to
  1992. * query the physical addresses of the channel doorbell registers
  1993. *
  1994. * @chan_hdl: Client handle previously obtained from
  1995. * gsi_alloc_channel
  1996. * @db_addr_wp_lsb: Physical address of doorbell register where the 32
  1997. * LSBs of the doorbell value should be written
  1998. * @db_addr_wp_msb: Physical address of doorbell register where the 32
  1999. * MSBs of the doorbell value should be written
  2000. *
  2001. * @Return gsi_status
  2002. */
  2003. int gsi_query_channel_db_addr(unsigned long chan_hdl,
  2004. uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
  2005. /**
  2006. * gsi_query_channel_info - Peripheral can call this function to query the
  2007. * channel and associated event ring (if any) status.
  2008. *
  2009. * @chan_hdl: Client handle previously obtained from
  2010. * gsi_alloc_channel
  2011. * @info: Where to read the values into
  2012. *
  2013. * @Return gsi_status
  2014. */
  2015. int gsi_query_channel_info(unsigned long chan_hdl,
  2016. struct gsi_chan_info *info);
  2017. /**
  2018. * gsi_is_channel_empty - Peripheral can call this function to query if
  2019. * the channel is empty. This is only applicable to GPI. "Empty" means
  2020. * GSI has consumed all descriptors for a TO_GSI channel and SW has
  2021. * processed all completed descriptors for a FROM_GSI channel.
  2022. *
  2023. * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
  2024. * @is_empty: set by GSI based on channel emptiness
  2025. *
  2026. * @Return gsi_status
  2027. */
  2028. int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
  2029. /**
  2030. * gsi_is_event_pending - Returns true if there is at least one event in the
  2031. * provided event ring which wasn't processed.
  2032. *
  2033. * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
  2034. *
  2035. * @Return true if an event is pending, else false
  2036. */
  2037. bool gsi_is_event_pending(unsigned long chan_hdl);
  2038. /**
  2039. * gsi_get_channel_cfg - This function returns the current config
  2040. * of the specified channel
  2041. *
  2042. * @chan_hdl: Client handle previously obtained from
  2043. * gsi_alloc_channel
  2044. * @props: where to copy properties to
  2045. * @scr: where to copy scratch info to
  2046. *
  2047. * @Return gsi_status
  2048. */
  2049. int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  2050. union gsi_channel_scratch *scr);
  2051. /**
  2052. * gsi_set_channel_cfg - This function applies the supplied config
  2053. * to the specified channel
  2054. *
  2055. * ch_id and evt_ring_hdl of the channel cannot be changed after
  2056. * gsi_alloc_channel
  2057. *
  2058. * @chan_hdl: Client handle previously obtained from
  2059. * gsi_alloc_channel
  2060. * @props: the properties to apply
  2061. * @scr: the scratch info to apply
  2062. *
  2063. * @Return gsi_status
  2064. */
  2065. int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
  2066. union gsi_channel_scratch *scr);
  2067. /**
  2068. * gsi_poll_n_channel - Peripheral should call this function to query for
  2069. * completed transfer descriptors.
  2070. *
  2071. * @chan_hdl: Client handle previously obtained from
  2072. * gsi_alloc_channel
  2073. * @notify: Information about the completed transfer if any
  2074. * @expected_num: Number of descriptor we want to poll each time.
  2075. * @actual_num: Actual number of descriptor we polled successfully.
  2076. *
  2077. * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
  2078. * completed)
  2079. */
  2080. int gsi_poll_n_channel(unsigned long chan_hdl,
  2081. struct gsi_chan_xfer_notify *notify,
  2082. int expected_num, int *actual_num);
  2083. /**
  2084. * gsi_start_xfer - Peripheral should call this function to
  2085. * inform HW about queued xfers
  2086. *
  2087. * @chan_hdl: Client handle previously obtained from
  2088. * gsi_alloc_channel
  2089. *
  2090. * @Return gsi_status
  2091. */
  2092. int gsi_start_xfer(unsigned long chan_hdl);
  2093. /**
  2094. * gsi_configure_regs - Peripheral should call this function
  2095. * to configure the GSI registers before/after the FW is
  2096. * loaded but before it is enabled.
  2097. *
  2098. * @per_base_addr: Base address of the peripheral using GSI
  2099. * @ver: GSI core version
  2100. *
  2101. * @Return gsi_status
  2102. */
  2103. int gsi_configure_regs(phys_addr_t per_base_addr, enum gsi_ver ver);
  2104. /**
  2105. * gsi_enable_fw - Peripheral should call this function
  2106. * to enable the GSI FW after the FW has been loaded to the SRAM.
  2107. *
  2108. * @gsi_base_addr: Base address of GSI register space
  2109. * @gsi_size: Mapping size of the GSI register space
  2110. * @ver: GSI core version
  2111. * @Return gsi_status
  2112. */
  2113. int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
  2114. /**
  2115. * gsi_get_inst_ram_offset_and_size - Peripheral should call this function
  2116. * to get instruction RAM base address offset and size. Peripheral typically
  2117. * uses this info to load GSI FW into the IRAM.
  2118. *
  2119. * @base_offset:[OUT] - IRAM base offset address
  2120. * @size: [OUT] - IRAM size
  2121. * @ver: GSI core version
  2122. * @Return none
  2123. */
  2124. void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
  2125. unsigned long *size, enum gsi_ver ver);
  2126. /**
  2127. * gsi_halt_channel_ee - Peripheral should call this function
  2128. * to stop other EE's channel. This is usually used in SSR clean
  2129. *
  2130. * @chan_idx: Virtual channel index
  2131. * @ee: EE
  2132. * @code: [out] response code for operation
  2133. * @Return gsi_status
  2134. */
  2135. int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
  2136. /**
  2137. * gsi_wdi3_write_evt_ring_db - write event ring doorbell address
  2138. *
  2139. * @chan_hdl: gsi channel handle
  2140. * @Return gsi_status
  2141. */
  2142. void gsi_wdi3_write_evt_ring_db(unsigned long chan_hdl, uint32_t db_addr_low,
  2143. uint32_t db_addr_high);
  2144. /**
  2145. * gsi_get_refetch_reg - get WP/RP value from re_fetch register
  2146. *
  2147. * @chan_hdl: gsi channel handle
  2148. * @is_rp: rp or wp
  2149. */
  2150. int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp);
  2151. /**
  2152. * gsi_ntn3_client_stats_get - get ntn3 stats
  2153. *
  2154. * @ep_id: ep index
  2155. * @scratch_id: scratch register number
  2156. * @chan_hdl: gsi channel handle
  2157. */
  2158. int gsi_ntn3_client_stats_get(unsigned ep_id, int scratch_id, unsigned chan_hdl);
  2159. /**
  2160. * gsi_get_drop_stats - get drop stats by GSI
  2161. *
  2162. * @ep_id: ep index
  2163. * @scratch_id: drop stats on which scratch register
  2164. * @chan_hdl: gsi channel handle
  2165. */
  2166. int gsi_get_drop_stats(unsigned long ep_id, int scratch_id,
  2167. unsigned long chan_hdl);
  2168. /**
  2169. * gsi_get_wp - get channel write pointer for stats
  2170. *
  2171. * @chan_hdl: gsi channel handle
  2172. */
  2173. int gsi_get_wp(unsigned long chan_hdl);
  2174. /**
  2175. * gsi_wdi3_dump_register - dump wdi3 related gsi registers
  2176. *
  2177. * @chan_hdl: gsi channel handle
  2178. */
  2179. void gsi_wdi3_dump_register(unsigned long chan_hdl);
  2180. /**
  2181. * gsi_map_base - Peripheral should call this function to configure
  2182. * access to the GSI registers.
  2183. * @gsi_base_addr: Base address of GSI register space
  2184. * @gsi_size: Mapping size of the GSI register space
  2185. * @ver: The appropriate GSI version enum
  2186. *
  2187. * @Return gsi_status
  2188. */
  2189. int gsi_map_base(phys_addr_t gsi_base_addr, u32 gsi_size, enum gsi_ver ver);
  2190. /**
  2191. * gsi_unmap_base - Peripheral should call this function to undo the
  2192. * effects of gsi_map_base
  2193. *
  2194. * @Return gsi_status
  2195. */
  2196. int gsi_unmap_base(void);
  2197. /**
  2198. * gsi_map_virtual_ch_to_per_ep - Peripheral should call this function
  2199. * to configure each GSI virtual channel with the per endpoint index.
  2200. *
  2201. * @ee: The ee to be used
  2202. * @chan_num: The channel to be used
  2203. * @per_ep_index: value to assign
  2204. *
  2205. * @Return gsi_status
  2206. */
  2207. int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index);
  2208. /**
  2209. * gsi_alloc_channel_ee - Peripheral should call this function
  2210. * to alloc other EE's channel. This is usually done in bootup to allocate all
  2211. * chnnels.
  2212. *
  2213. * @chan_idx: Virtual channel index
  2214. * @ee: EE
  2215. * @code: [out] response code for operation
  2216. * @Return gsi_status
  2217. */
  2218. int gsi_alloc_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
  2219. /**
  2220. * gsi_enable_flow_control_ee - Peripheral should call this function
  2221. * to enable flow control other EE's channel. This is usually done in USB
  2222. * connent and SSR scenarios.
  2223. *
  2224. * @chan_idx: Virtual channel index
  2225. * @ee: EE
  2226. * @code: [out] response code for operation
  2227. * @Return gsi_status
  2228. */
  2229. int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
  2230. int *code);
  2231. /**
  2232. * gsi_query_msi_addr - get gsi channel msi address
  2233. *
  2234. * @chan_id: channel id
  2235. * @addr: [out] channel msi address
  2236. *
  2237. * @Return gsi_status
  2238. */
  2239. int gsi_query_msi_addr(unsigned long chan_hdl, phys_addr_t *addr);
  2240. /**
  2241. * gsi_query_device_msi_addr - get gsi device msi address
  2242. *
  2243. * @addr: [out] msi address
  2244. *
  2245. * @Return gsi_status
  2246. */
  2247. int gsi_query_device_msi_addr(u64 *addr);
  2248. /**
  2249. * gsi_update_almst_empty_thrshold - update almst_empty_thrshold
  2250. *
  2251. * @chan_id: channel id
  2252. * @threshold: Threshold value for channel almost empty indication to MCS.
  2253. *
  2254. */
  2255. void gsi_update_almst_empty_thrshold(unsigned long chan_hdl, unsigned short threshold);
  2256. /**
  2257. * gsi_dump_ch_info - channel information.
  2258. *
  2259. * @chan_id: channel id
  2260. *
  2261. * @Return void
  2262. */
  2263. void gsi_dump_ch_info(unsigned long chan_hdl);
  2264. /**
  2265. * gsi_get_hw_profiling_stats() - Query GSI HW profiling stats
  2266. * @stats: [out] stats blob from client populated by driver
  2267. *
  2268. * Returns: 0 on success, negative on failure
  2269. *
  2270. */
  2271. int gsi_get_hw_profiling_stats(struct gsi_hw_profiling_data *stats);
  2272. /**
  2273. * gsi_get_fw_version() - Query GSI FW version
  2274. * @ver: [out] ver blob from client populated by driver
  2275. *
  2276. * Returns: 0 on success, negative on failure
  2277. *
  2278. */
  2279. int gsi_get_fw_version(struct gsi_fw_version *ver);
  2280. int gsi_flow_control_ee(unsigned int chan_idx, int ep_id, unsigned int ee,
  2281. bool enable, bool prmy_scnd_fc, int *code);
  2282. int gsi_query_flow_control_state_ee(unsigned int chan_idx, unsigned int ee,
  2283. bool prmy_scnd_fc, int *code);
  2284. /*
  2285. * Here is a typical sequence of calls
  2286. *
  2287. * gsi_register_device
  2288. *
  2289. * gsi_write_device_scratch (if the protocol needs this)
  2290. *
  2291. * gsi_alloc_evt_ring (for as many event rings as needed)
  2292. * gsi_write_evt_ring_scratch
  2293. *
  2294. * gsi_alloc_channel (for as many channels as needed; channels can have
  2295. * no event ring, an exclusive event ring or a shared event ring)
  2296. * gsi_write_channel_scratch
  2297. * gsi_read_channel_scratch
  2298. * gsi_start_channel
  2299. * gsi_queue_xfer/gsi_start_xfer
  2300. * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
  2301. * xfer completions)
  2302. * gsi_stop_db_channel/gsi_stop_channel
  2303. *
  2304. * gsi_dealloc_channel
  2305. *
  2306. * gsi_dealloc_evt_ring
  2307. *
  2308. * gsi_deregister_device
  2309. *
  2310. */
  2311. /**
  2312. * These APIs are mostly for the ipa_stats module
  2313. */
  2314. uint64_t gsi_read_event_ring_wp(int evtr_id, int ee);
  2315. uint64_t gsi_read_event_ring_bp(int evt_hdl);
  2316. uint64_t gsi_get_evt_ring_rp(int evt_hdl);
  2317. uint64_t gsi_read_chan_ring_wp(int chan_id, int ee);
  2318. uint64_t gsi_read_chan_ring_rp(int chan_id, int ee);
  2319. uint64_t gsi_read_chan_ring_bp(int chan_hdl);
  2320. uint64_t gsi_read_chan_ring_re_fetch_wp(int chan_id, int ee);
  2321. enum gsi_chan_prot gsi_get_chan_prot_type(int chan_hdl);
  2322. enum gsi_chan_state gsi_get_chan_state(int chan_hdl);
  2323. int gsi_get_chan_poll_mode(int chan_hdl);
  2324. uint32_t gsi_get_ring_len(int chan_hdl);
  2325. uint8_t gsi_get_chan_props_db_in_bytes(int chan_hdl);
  2326. enum gsi_evt_ring_elem_size gsi_get_evt_ring_re_size(int evt_hdl);
  2327. uint32_t gsi_get_evt_ring_len(int evt_hdl);
  2328. int gsi_get_peripheral_ee(void);
  2329. uint32_t gsi_get_chan_stop_stm(int chan_id, int ee);
  2330. #endif