gsi.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2018-2022 Linaro Ltd.
  4. */
  5. #include <linux/types.h>
  6. #include <linux/bits.h>
  7. #include <linux/bitfield.h>
  8. #include <linux/mutex.h>
  9. #include <linux/completion.h>
  10. #include <linux/io.h>
  11. #include <linux/bug.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/netdevice.h>
  15. #include "gsi.h"
  16. #include "gsi_reg.h"
  17. #include "gsi_private.h"
  18. #include "gsi_trans.h"
  19. #include "ipa_gsi.h"
  20. #include "ipa_data.h"
  21. #include "ipa_version.h"
  22. /**
  23. * DOC: The IPA Generic Software Interface
  24. *
  25. * The generic software interface (GSI) is an integral component of the IPA,
  26. * providing a well-defined communication layer between the AP subsystem
  27. * and the IPA core. The modem uses the GSI layer as well.
  28. *
  29. * -------- ---------
  30. * | | | |
  31. * | AP +<---. .----+ Modem |
  32. * | +--. | | .->+ |
  33. * | | | | | | | |
  34. * -------- | | | | ---------
  35. * v | v |
  36. * --+-+---+-+--
  37. * | GSI |
  38. * |-----------|
  39. * | |
  40. * | IPA |
  41. * | |
  42. * -------------
  43. *
  44. * In the above diagram, the AP and Modem represent "execution environments"
  45. * (EEs), which are independent operating environments that use the IPA for
  46. * data transfer.
  47. *
  48. * Each EE uses a set of unidirectional GSI "channels," which allow transfer
  49. * of data to or from the IPA. A channel is implemented as a ring buffer,
  50. * with a DRAM-resident array of "transfer elements" (TREs) available to
  51. * describe transfers to or from other EEs through the IPA. A transfer
  52. * element can also contain an immediate command, requesting the IPA perform
  53. * actions other than data transfer.
  54. *
  55. * Each TRE refers to a block of data--also located in DRAM. After writing
  56. * one or more TREs to a channel, the writer (either the IPA or an EE) writes
  57. * a doorbell register to inform the receiving side how many elements have
  58. * been written.
  59. *
  60. * Each channel has a GSI "event ring" associated with it. An event ring
  61. * is implemented very much like a channel ring, but is always directed from
  62. * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
  63. * events by adding an entry to the event ring associated with the channel.
  64. * The GSI then writes its doorbell for the event ring, causing the target
  65. * EE to be interrupted. Each entry in an event ring contains a pointer
  66. * to the channel TRE whose completion the event represents.
  67. *
  68. * Each TRE in a channel ring has a set of flags. One flag indicates whether
  69. * the completion of the transfer operation generates an entry (and possibly
  70. * an interrupt) in the channel's event ring. Other flags allow transfer
  71. * elements to be chained together, forming a single logical transaction.
  72. * TRE flags are used to control whether and when interrupts are generated
  73. * to signal completion of channel transfers.
  74. *
  75. * Elements in channel and event rings are completed (or consumed) strictly
  76. * in order. Completion of one entry implies the completion of all preceding
  77. * entries. A single completion interrupt can therefore communicate the
  78. * completion of many transfers.
  79. *
  80. * Note that all GSI registers are little-endian, which is the assumed
  81. * endianness of I/O space accesses. The accessor functions perform byte
  82. * swapping if needed (i.e., for a big endian CPU).
  83. */
  84. /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
  85. #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
  86. #define GSI_CMD_TIMEOUT 50 /* milliseconds */
  87. #define GSI_CHANNEL_STOP_RETRIES 10
  88. #define GSI_CHANNEL_MODEM_HALT_RETRIES 10
  89. #define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
  90. #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
  91. #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
  92. #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
  93. /* An entry in an event ring */
  94. struct gsi_event {
  95. __le64 xfer_ptr;
  96. __le16 len;
  97. u8 reserved1;
  98. u8 code;
  99. __le16 reserved2;
  100. u8 type;
  101. u8 chid;
  102. };
  103. /** gsi_channel_scratch_gpi - GPI protocol scratch register
  104. * @max_outstanding_tre:
  105. * Defines the maximum number of TREs allowed in a single transaction
  106. * on a channel (in bytes). This determines the amount of prefetch
  107. * performed by the hardware. We configure this to equal the size of
  108. * the TLV FIFO for the channel.
  109. * @outstanding_threshold:
  110. * Defines the threshold (in bytes) determining when the sequencer
  111. * should update the channel doorbell. We configure this to equal
  112. * the size of two TREs.
  113. */
  114. struct gsi_channel_scratch_gpi {
  115. u64 reserved1;
  116. u16 reserved2;
  117. u16 max_outstanding_tre;
  118. u16 reserved3;
  119. u16 outstanding_threshold;
  120. };
  121. /** gsi_channel_scratch - channel scratch configuration area
  122. *
  123. * The exact interpretation of this register is protocol-specific.
  124. * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
  125. */
  126. union gsi_channel_scratch {
  127. struct gsi_channel_scratch_gpi gpi;
  128. struct {
  129. u32 word1;
  130. u32 word2;
  131. u32 word3;
  132. u32 word4;
  133. } data;
  134. };
  135. /* Check things that can be validated at build time. */
  136. static void gsi_validate_build(void)
  137. {
  138. /* This is used as a divisor */
  139. BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
  140. /* Code assumes the size of channel and event ring element are
  141. * the same (and fixed). Make sure the size of an event ring
  142. * element is what's expected.
  143. */
  144. BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
  145. /* Hardware requires a 2^n ring size. We ensure the number of
  146. * elements in an event ring is a power of 2 elsewhere; this
  147. * ensure the elements themselves meet the requirement.
  148. */
  149. BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
  150. /* The channel element size must fit in this field */
  151. BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
  152. /* The event ring element size must fit in this field */
  153. BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
  154. }
  155. /* Return the channel id associated with a given channel */
  156. static u32 gsi_channel_id(struct gsi_channel *channel)
  157. {
  158. return channel - &channel->gsi->channel[0];
  159. }
  160. /* An initialized channel has a non-null GSI pointer */
  161. static bool gsi_channel_initialized(struct gsi_channel *channel)
  162. {
  163. return !!channel->gsi;
  164. }
  165. /* Update the GSI IRQ type register with the cached value */
  166. static void gsi_irq_type_update(struct gsi *gsi, u32 val)
  167. {
  168. gsi->type_enabled_bitmap = val;
  169. iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
  170. }
  171. static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
  172. {
  173. gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
  174. }
  175. static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
  176. {
  177. gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
  178. }
  179. /* Event ring commands are performed one at a time. Their completion
  180. * is signaled by the event ring control GSI interrupt type, which is
  181. * only enabled when we issue an event ring command. Only the event
  182. * ring being operated on has this interrupt enabled.
  183. */
  184. static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
  185. {
  186. u32 val = BIT(evt_ring_id);
  187. /* There's a small chance that a previous command completed
  188. * after the interrupt was disabled, so make sure we have no
  189. * pending interrupts before we enable them.
  190. */
  191. iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
  192. iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
  193. gsi_irq_type_enable(gsi, GSI_EV_CTRL);
  194. }
  195. /* Disable event ring control interrupts */
  196. static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
  197. {
  198. gsi_irq_type_disable(gsi, GSI_EV_CTRL);
  199. iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
  200. }
  201. /* Channel commands are performed one at a time. Their completion is
  202. * signaled by the channel control GSI interrupt type, which is only
  203. * enabled when we issue a channel command. Only the channel being
  204. * operated on has this interrupt enabled.
  205. */
  206. static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
  207. {
  208. u32 val = BIT(channel_id);
  209. /* There's a small chance that a previous command completed
  210. * after the interrupt was disabled, so make sure we have no
  211. * pending interrupts before we enable them.
  212. */
  213. iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
  214. iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
  215. gsi_irq_type_enable(gsi, GSI_CH_CTRL);
  216. }
  217. /* Disable channel control interrupts */
  218. static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
  219. {
  220. gsi_irq_type_disable(gsi, GSI_CH_CTRL);
  221. iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
  222. }
  223. static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
  224. {
  225. bool enable_ieob = !gsi->ieob_enabled_bitmap;
  226. u32 val;
  227. gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
  228. val = gsi->ieob_enabled_bitmap;
  229. iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
  230. /* Enable the interrupt type if this is the first channel enabled */
  231. if (enable_ieob)
  232. gsi_irq_type_enable(gsi, GSI_IEOB);
  233. }
  234. static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
  235. {
  236. u32 val;
  237. gsi->ieob_enabled_bitmap &= ~event_mask;
  238. /* Disable the interrupt type if this was the last enabled channel */
  239. if (!gsi->ieob_enabled_bitmap)
  240. gsi_irq_type_disable(gsi, GSI_IEOB);
  241. val = gsi->ieob_enabled_bitmap;
  242. iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
  243. }
  244. static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
  245. {
  246. gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
  247. }
  248. /* Enable all GSI_interrupt types */
  249. static void gsi_irq_enable(struct gsi *gsi)
  250. {
  251. u32 val;
  252. /* Global interrupts include hardware error reports. Enable
  253. * that so we can at least report the error should it occur.
  254. */
  255. iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
  256. gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
  257. /* General GSI interrupts are reported to all EEs; if they occur
  258. * they are unrecoverable (without reset). A breakpoint interrupt
  259. * also exists, but we don't support that. We want to be notified
  260. * of errors so we can report them, even if they can't be handled.
  261. */
  262. val = BIT(BUS_ERROR);
  263. val |= BIT(CMD_FIFO_OVRFLOW);
  264. val |= BIT(MCS_STACK_OVRFLOW);
  265. iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
  266. gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
  267. }
  268. /* Disable all GSI interrupt types */
  269. static void gsi_irq_disable(struct gsi *gsi)
  270. {
  271. gsi_irq_type_update(gsi, 0);
  272. /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
  273. iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
  274. iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
  275. }
  276. /* Return the virtual address associated with a ring index */
  277. void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
  278. {
  279. /* Note: index *must* be used modulo the ring count here */
  280. return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
  281. }
  282. /* Return the 32-bit DMA address associated with a ring index */
  283. static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
  284. {
  285. return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
  286. }
  287. /* Return the ring index of a 32-bit ring offset */
  288. static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
  289. {
  290. return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
  291. }
  292. /* Issue a GSI command by writing a value to a register, then wait for
  293. * completion to be signaled. Returns true if the command completes
  294. * or false if it times out.
  295. */
  296. static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
  297. {
  298. unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
  299. struct completion *completion = &gsi->completion;
  300. reinit_completion(completion);
  301. iowrite32(val, gsi->virt + reg);
  302. return !!wait_for_completion_timeout(completion, timeout);
  303. }
  304. /* Return the hardware's notion of the current state of an event ring */
  305. static enum gsi_evt_ring_state
  306. gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
  307. {
  308. u32 val;
  309. val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
  310. return u32_get_bits(val, EV_CHSTATE_FMASK);
  311. }
  312. /* Issue an event ring command and wait for it to complete */
  313. static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
  314. enum gsi_evt_cmd_opcode opcode)
  315. {
  316. struct device *dev = gsi->dev;
  317. bool timeout;
  318. u32 val;
  319. /* Enable the completion interrupt for the command */
  320. gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
  321. val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
  322. val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
  323. timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
  324. gsi_irq_ev_ctrl_disable(gsi);
  325. if (!timeout)
  326. return;
  327. dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
  328. opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
  329. }
  330. /* Allocate an event ring in NOT_ALLOCATED state */
  331. static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
  332. {
  333. enum gsi_evt_ring_state state;
  334. /* Get initial event ring state */
  335. state = gsi_evt_ring_state(gsi, evt_ring_id);
  336. if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
  337. dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
  338. evt_ring_id, state);
  339. return -EINVAL;
  340. }
  341. gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
  342. /* If successful the event ring state will have changed */
  343. state = gsi_evt_ring_state(gsi, evt_ring_id);
  344. if (state == GSI_EVT_RING_STATE_ALLOCATED)
  345. return 0;
  346. dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
  347. evt_ring_id, state);
  348. return -EIO;
  349. }
  350. /* Reset a GSI event ring in ALLOCATED or ERROR state. */
  351. static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
  352. {
  353. enum gsi_evt_ring_state state;
  354. state = gsi_evt_ring_state(gsi, evt_ring_id);
  355. if (state != GSI_EVT_RING_STATE_ALLOCATED &&
  356. state != GSI_EVT_RING_STATE_ERROR) {
  357. dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
  358. evt_ring_id, state);
  359. return;
  360. }
  361. gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
  362. /* If successful the event ring state will have changed */
  363. state = gsi_evt_ring_state(gsi, evt_ring_id);
  364. if (state == GSI_EVT_RING_STATE_ALLOCATED)
  365. return;
  366. dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
  367. evt_ring_id, state);
  368. }
  369. /* Issue a hardware de-allocation request for an allocated event ring */
  370. static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
  371. {
  372. enum gsi_evt_ring_state state;
  373. state = gsi_evt_ring_state(gsi, evt_ring_id);
  374. if (state != GSI_EVT_RING_STATE_ALLOCATED) {
  375. dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
  376. evt_ring_id, state);
  377. return;
  378. }
  379. gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
  380. /* If successful the event ring state will have changed */
  381. state = gsi_evt_ring_state(gsi, evt_ring_id);
  382. if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
  383. return;
  384. dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
  385. evt_ring_id, state);
  386. }
  387. /* Fetch the current state of a channel from hardware */
  388. static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
  389. {
  390. u32 channel_id = gsi_channel_id(channel);
  391. void __iomem *virt = channel->gsi->virt;
  392. u32 val;
  393. val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
  394. return u32_get_bits(val, CHSTATE_FMASK);
  395. }
  396. /* Issue a channel command and wait for it to complete */
  397. static void
  398. gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
  399. {
  400. u32 channel_id = gsi_channel_id(channel);
  401. struct gsi *gsi = channel->gsi;
  402. struct device *dev = gsi->dev;
  403. bool timeout;
  404. u32 val;
  405. /* Enable the completion interrupt for the command */
  406. gsi_irq_ch_ctrl_enable(gsi, channel_id);
  407. val = u32_encode_bits(channel_id, CH_CHID_FMASK);
  408. val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
  409. timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
  410. gsi_irq_ch_ctrl_disable(gsi);
  411. if (!timeout)
  412. return;
  413. dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
  414. opcode, channel_id, gsi_channel_state(channel));
  415. }
  416. /* Allocate GSI channel in NOT_ALLOCATED state */
  417. static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
  418. {
  419. struct gsi_channel *channel = &gsi->channel[channel_id];
  420. struct device *dev = gsi->dev;
  421. enum gsi_channel_state state;
  422. /* Get initial channel state */
  423. state = gsi_channel_state(channel);
  424. if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
  425. dev_err(dev, "channel %u bad state %u before alloc\n",
  426. channel_id, state);
  427. return -EINVAL;
  428. }
  429. gsi_channel_command(channel, GSI_CH_ALLOCATE);
  430. /* If successful the channel state will have changed */
  431. state = gsi_channel_state(channel);
  432. if (state == GSI_CHANNEL_STATE_ALLOCATED)
  433. return 0;
  434. dev_err(dev, "channel %u bad state %u after alloc\n",
  435. channel_id, state);
  436. return -EIO;
  437. }
  438. /* Start an ALLOCATED channel */
  439. static int gsi_channel_start_command(struct gsi_channel *channel)
  440. {
  441. struct device *dev = channel->gsi->dev;
  442. enum gsi_channel_state state;
  443. state = gsi_channel_state(channel);
  444. if (state != GSI_CHANNEL_STATE_ALLOCATED &&
  445. state != GSI_CHANNEL_STATE_STOPPED) {
  446. dev_err(dev, "channel %u bad state %u before start\n",
  447. gsi_channel_id(channel), state);
  448. return -EINVAL;
  449. }
  450. gsi_channel_command(channel, GSI_CH_START);
  451. /* If successful the channel state will have changed */
  452. state = gsi_channel_state(channel);
  453. if (state == GSI_CHANNEL_STATE_STARTED)
  454. return 0;
  455. dev_err(dev, "channel %u bad state %u after start\n",
  456. gsi_channel_id(channel), state);
  457. return -EIO;
  458. }
  459. /* Stop a GSI channel in STARTED state */
  460. static int gsi_channel_stop_command(struct gsi_channel *channel)
  461. {
  462. struct device *dev = channel->gsi->dev;
  463. enum gsi_channel_state state;
  464. state = gsi_channel_state(channel);
  465. /* Channel could have entered STOPPED state since last call
  466. * if it timed out. If so, we're done.
  467. */
  468. if (state == GSI_CHANNEL_STATE_STOPPED)
  469. return 0;
  470. if (state != GSI_CHANNEL_STATE_STARTED &&
  471. state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
  472. dev_err(dev, "channel %u bad state %u before stop\n",
  473. gsi_channel_id(channel), state);
  474. return -EINVAL;
  475. }
  476. gsi_channel_command(channel, GSI_CH_STOP);
  477. /* If successful the channel state will have changed */
  478. state = gsi_channel_state(channel);
  479. if (state == GSI_CHANNEL_STATE_STOPPED)
  480. return 0;
  481. /* We may have to try again if stop is in progress */
  482. if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
  483. return -EAGAIN;
  484. dev_err(dev, "channel %u bad state %u after stop\n",
  485. gsi_channel_id(channel), state);
  486. return -EIO;
  487. }
  488. /* Reset a GSI channel in ALLOCATED or ERROR state. */
  489. static void gsi_channel_reset_command(struct gsi_channel *channel)
  490. {
  491. struct device *dev = channel->gsi->dev;
  492. enum gsi_channel_state state;
  493. /* A short delay is required before a RESET command */
  494. usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
  495. state = gsi_channel_state(channel);
  496. if (state != GSI_CHANNEL_STATE_STOPPED &&
  497. state != GSI_CHANNEL_STATE_ERROR) {
  498. /* No need to reset a channel already in ALLOCATED state */
  499. if (state != GSI_CHANNEL_STATE_ALLOCATED)
  500. dev_err(dev, "channel %u bad state %u before reset\n",
  501. gsi_channel_id(channel), state);
  502. return;
  503. }
  504. gsi_channel_command(channel, GSI_CH_RESET);
  505. /* If successful the channel state will have changed */
  506. state = gsi_channel_state(channel);
  507. if (state != GSI_CHANNEL_STATE_ALLOCATED)
  508. dev_err(dev, "channel %u bad state %u after reset\n",
  509. gsi_channel_id(channel), state);
  510. }
  511. /* Deallocate an ALLOCATED GSI channel */
  512. static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
  513. {
  514. struct gsi_channel *channel = &gsi->channel[channel_id];
  515. struct device *dev = gsi->dev;
  516. enum gsi_channel_state state;
  517. state = gsi_channel_state(channel);
  518. if (state != GSI_CHANNEL_STATE_ALLOCATED) {
  519. dev_err(dev, "channel %u bad state %u before dealloc\n",
  520. channel_id, state);
  521. return;
  522. }
  523. gsi_channel_command(channel, GSI_CH_DE_ALLOC);
  524. /* If successful the channel state will have changed */
  525. state = gsi_channel_state(channel);
  526. if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
  527. dev_err(dev, "channel %u bad state %u after dealloc\n",
  528. channel_id, state);
  529. }
  530. /* Ring an event ring doorbell, reporting the last entry processed by the AP.
  531. * The index argument (modulo the ring count) is the first unfilled entry, so
  532. * we supply one less than that with the doorbell. Update the event ring
  533. * index field with the value provided.
  534. */
  535. static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
  536. {
  537. struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
  538. u32 val;
  539. ring->index = index; /* Next unused entry */
  540. /* Note: index *must* be used modulo the ring count here */
  541. val = gsi_ring_addr(ring, (index - 1) % ring->count);
  542. iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
  543. }
  544. /* Program an event ring for use */
  545. static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
  546. {
  547. struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
  548. struct gsi_ring *ring = &evt_ring->ring;
  549. size_t size;
  550. u32 val;
  551. /* We program all event rings as GPI type/protocol */
  552. val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
  553. val |= EV_INTYPE_FMASK;
  554. val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
  555. iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
  556. size = ring->count * GSI_RING_ELEMENT_SIZE;
  557. val = ev_r_length_encoded(gsi->version, size);
  558. iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
  559. /* The context 2 and 3 registers store the low-order and
  560. * high-order 32 bits of the address of the event ring,
  561. * respectively.
  562. */
  563. val = lower_32_bits(ring->addr);
  564. iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
  565. val = upper_32_bits(ring->addr);
  566. iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
  567. /* Enable interrupt moderation by setting the moderation delay */
  568. val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
  569. val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
  570. iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
  571. /* No MSI write data, and MSI address high and low address is 0 */
  572. iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
  573. iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
  574. iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
  575. /* We don't need to get event read pointer updates */
  576. iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
  577. iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
  578. /* Finally, tell the hardware our "last processed" event (arbitrary) */
  579. gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
  580. }
  581. /* Find the transaction whose completion indicates a channel is quiesced */
  582. static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
  583. {
  584. struct gsi_trans_info *trans_info = &channel->trans_info;
  585. u32 pending_id = trans_info->pending_id;
  586. struct gsi_trans *trans;
  587. u16 trans_id;
  588. if (channel->toward_ipa && pending_id != trans_info->free_id) {
  589. /* There is a small chance a TX transaction got allocated
  590. * just before we disabled transmits, so check for that.
  591. * The last allocated, committed, or pending transaction
  592. * precedes the first free transaction.
  593. */
  594. trans_id = trans_info->free_id - 1;
  595. } else if (trans_info->polled_id != pending_id) {
  596. /* Otherwise (TX or RX) we want to wait for anything that
  597. * has completed, or has been polled but not released yet.
  598. *
  599. * The last completed or polled transaction precedes the
  600. * first pending transaction.
  601. */
  602. trans_id = pending_id - 1;
  603. } else {
  604. return NULL;
  605. }
  606. /* Caller will wait for this, so take a reference */
  607. trans = &trans_info->trans[trans_id % channel->tre_count];
  608. refcount_inc(&trans->refcount);
  609. return trans;
  610. }
  611. /* Wait for transaction activity on a channel to complete */
  612. static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
  613. {
  614. struct gsi_trans *trans;
  615. /* Get the last transaction, and wait for it to complete */
  616. trans = gsi_channel_trans_last(channel);
  617. if (trans) {
  618. wait_for_completion(&trans->completion);
  619. gsi_trans_free(trans);
  620. }
  621. }
  622. /* Program a channel for use; there is no gsi_channel_deprogram() */
  623. static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
  624. {
  625. size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
  626. u32 channel_id = gsi_channel_id(channel);
  627. union gsi_channel_scratch scr = { };
  628. struct gsi_channel_scratch_gpi *gpi;
  629. struct gsi *gsi = channel->gsi;
  630. u32 wrr_weight = 0;
  631. u32 val;
  632. /* We program all channels as GPI type/protocol */
  633. val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
  634. if (channel->toward_ipa)
  635. val |= CHTYPE_DIR_FMASK;
  636. val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
  637. val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
  638. iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
  639. val = r_length_encoded(gsi->version, size);
  640. iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
  641. /* The context 2 and 3 registers store the low-order and
  642. * high-order 32 bits of the address of the channel ring,
  643. * respectively.
  644. */
  645. val = lower_32_bits(channel->tre_ring.addr);
  646. iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
  647. val = upper_32_bits(channel->tre_ring.addr);
  648. iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
  649. /* Command channel gets low weighted round-robin priority */
  650. if (channel->command)
  651. wrr_weight = field_max(WRR_WEIGHT_FMASK);
  652. val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
  653. /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
  654. /* No need to use the doorbell engine starting at IPA v4.0 */
  655. if (gsi->version < IPA_VERSION_4_0 && doorbell)
  656. val |= USE_DB_ENG_FMASK;
  657. /* v4.0 introduces an escape buffer for prefetch. We use it
  658. * on all but the AP command channel.
  659. */
  660. if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
  661. /* If not otherwise set, prefetch buffers are used */
  662. if (gsi->version < IPA_VERSION_4_5)
  663. val |= USE_ESCAPE_BUF_ONLY_FMASK;
  664. else
  665. val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
  666. PREFETCH_MODE_FMASK);
  667. }
  668. /* All channels set DB_IN_BYTES */
  669. if (gsi->version >= IPA_VERSION_4_9)
  670. val |= DB_IN_BYTES;
  671. iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
  672. /* Now update the scratch registers for GPI protocol */
  673. gpi = &scr.gpi;
  674. gpi->max_outstanding_tre = channel->trans_tre_max *
  675. GSI_RING_ELEMENT_SIZE;
  676. gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
  677. val = scr.data.word1;
  678. iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
  679. val = scr.data.word2;
  680. iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
  681. val = scr.data.word3;
  682. iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
  683. /* We must preserve the upper 16 bits of the last scratch register.
  684. * The next sequence assumes those bits remain unchanged between the
  685. * read and the write.
  686. */
  687. val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
  688. val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
  689. iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
  690. /* All done! */
  691. }
  692. static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
  693. {
  694. struct gsi *gsi = channel->gsi;
  695. int ret;
  696. /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
  697. if (resume && gsi->version < IPA_VERSION_4_0)
  698. return 0;
  699. mutex_lock(&gsi->mutex);
  700. ret = gsi_channel_start_command(channel);
  701. mutex_unlock(&gsi->mutex);
  702. return ret;
  703. }
  704. /* Start an allocated GSI channel */
  705. int gsi_channel_start(struct gsi *gsi, u32 channel_id)
  706. {
  707. struct gsi_channel *channel = &gsi->channel[channel_id];
  708. int ret;
  709. /* Enable NAPI and the completion interrupt */
  710. napi_enable(&channel->napi);
  711. gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
  712. ret = __gsi_channel_start(channel, false);
  713. if (ret) {
  714. gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
  715. napi_disable(&channel->napi);
  716. }
  717. return ret;
  718. }
  719. static int gsi_channel_stop_retry(struct gsi_channel *channel)
  720. {
  721. u32 retries = GSI_CHANNEL_STOP_RETRIES;
  722. int ret;
  723. do {
  724. ret = gsi_channel_stop_command(channel);
  725. if (ret != -EAGAIN)
  726. break;
  727. usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
  728. } while (retries--);
  729. return ret;
  730. }
  731. static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
  732. {
  733. struct gsi *gsi = channel->gsi;
  734. int ret;
  735. /* Wait for any underway transactions to complete before stopping. */
  736. gsi_channel_trans_quiesce(channel);
  737. /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
  738. if (suspend && gsi->version < IPA_VERSION_4_0)
  739. return 0;
  740. mutex_lock(&gsi->mutex);
  741. ret = gsi_channel_stop_retry(channel);
  742. mutex_unlock(&gsi->mutex);
  743. return ret;
  744. }
  745. /* Stop a started channel */
  746. int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
  747. {
  748. struct gsi_channel *channel = &gsi->channel[channel_id];
  749. int ret;
  750. ret = __gsi_channel_stop(channel, false);
  751. if (ret)
  752. return ret;
  753. /* Disable the completion interrupt and NAPI if successful */
  754. gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
  755. napi_disable(&channel->napi);
  756. return 0;
  757. }
  758. /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
  759. void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
  760. {
  761. struct gsi_channel *channel = &gsi->channel[channel_id];
  762. mutex_lock(&gsi->mutex);
  763. gsi_channel_reset_command(channel);
  764. /* Due to a hardware quirk we may need to reset RX channels twice. */
  765. if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
  766. gsi_channel_reset_command(channel);
  767. /* Hardware assumes this is 0 following reset */
  768. channel->tre_ring.index = 0;
  769. gsi_channel_program(channel, doorbell);
  770. gsi_channel_trans_cancel_pending(channel);
  771. mutex_unlock(&gsi->mutex);
  772. }
  773. /* Stop a started channel for suspend */
  774. int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
  775. {
  776. struct gsi_channel *channel = &gsi->channel[channel_id];
  777. int ret;
  778. ret = __gsi_channel_stop(channel, true);
  779. if (ret)
  780. return ret;
  781. /* Ensure NAPI polling has finished. */
  782. napi_synchronize(&channel->napi);
  783. return 0;
  784. }
  785. /* Resume a suspended channel (starting if stopped) */
  786. int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
  787. {
  788. struct gsi_channel *channel = &gsi->channel[channel_id];
  789. return __gsi_channel_start(channel, true);
  790. }
  791. /* Prevent all GSI interrupts while suspended */
  792. void gsi_suspend(struct gsi *gsi)
  793. {
  794. disable_irq(gsi->irq);
  795. }
  796. /* Allow all GSI interrupts again when resuming */
  797. void gsi_resume(struct gsi *gsi)
  798. {
  799. enable_irq(gsi->irq);
  800. }
  801. void gsi_trans_tx_committed(struct gsi_trans *trans)
  802. {
  803. struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
  804. channel->trans_count++;
  805. channel->byte_count += trans->len;
  806. trans->trans_count = channel->trans_count;
  807. trans->byte_count = channel->byte_count;
  808. }
  809. void gsi_trans_tx_queued(struct gsi_trans *trans)
  810. {
  811. u32 channel_id = trans->channel_id;
  812. struct gsi *gsi = trans->gsi;
  813. struct gsi_channel *channel;
  814. u32 trans_count;
  815. u32 byte_count;
  816. channel = &gsi->channel[channel_id];
  817. byte_count = channel->byte_count - channel->queued_byte_count;
  818. trans_count = channel->trans_count - channel->queued_trans_count;
  819. channel->queued_byte_count = channel->byte_count;
  820. channel->queued_trans_count = channel->trans_count;
  821. ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
  822. }
  823. /**
  824. * gsi_trans_tx_completed() - Report completed TX transactions
  825. * @trans: TX channel transaction that has completed
  826. *
  827. * Report that a transaction on a TX channel has completed. At the time a
  828. * transaction is committed, we record *in the transaction* its channel's
  829. * committed transaction and byte counts. Transactions are completed in
  830. * order, and the difference between the channel's byte/transaction count
  831. * when the transaction was committed and when it completes tells us
  832. * exactly how much data has been transferred while the transaction was
  833. * pending.
  834. *
  835. * We report this information to the network stack, which uses it to manage
  836. * the rate at which data is sent to hardware.
  837. */
  838. static void gsi_trans_tx_completed(struct gsi_trans *trans)
  839. {
  840. u32 channel_id = trans->channel_id;
  841. struct gsi *gsi = trans->gsi;
  842. struct gsi_channel *channel;
  843. u32 trans_count;
  844. u32 byte_count;
  845. channel = &gsi->channel[channel_id];
  846. trans_count = trans->trans_count - channel->compl_trans_count;
  847. byte_count = trans->byte_count - channel->compl_byte_count;
  848. channel->compl_trans_count += trans_count;
  849. channel->compl_byte_count += byte_count;
  850. ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
  851. }
  852. /* Channel control interrupt handler */
  853. static void gsi_isr_chan_ctrl(struct gsi *gsi)
  854. {
  855. u32 channel_mask;
  856. channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
  857. iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
  858. while (channel_mask) {
  859. u32 channel_id = __ffs(channel_mask);
  860. channel_mask ^= BIT(channel_id);
  861. complete(&gsi->completion);
  862. }
  863. }
  864. /* Event ring control interrupt handler */
  865. static void gsi_isr_evt_ctrl(struct gsi *gsi)
  866. {
  867. u32 event_mask;
  868. event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
  869. iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
  870. while (event_mask) {
  871. u32 evt_ring_id = __ffs(event_mask);
  872. event_mask ^= BIT(evt_ring_id);
  873. complete(&gsi->completion);
  874. }
  875. }
  876. /* Global channel error interrupt handler */
  877. static void
  878. gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
  879. {
  880. if (code == GSI_OUT_OF_RESOURCES) {
  881. dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
  882. complete(&gsi->completion);
  883. return;
  884. }
  885. /* Report, but otherwise ignore all other error codes */
  886. dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
  887. channel_id, err_ee, code);
  888. }
  889. /* Global event error interrupt handler */
  890. static void
  891. gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
  892. {
  893. if (code == GSI_OUT_OF_RESOURCES) {
  894. struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
  895. u32 channel_id = gsi_channel_id(evt_ring->channel);
  896. complete(&gsi->completion);
  897. dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
  898. channel_id);
  899. return;
  900. }
  901. /* Report, but otherwise ignore all other error codes */
  902. dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
  903. evt_ring_id, err_ee, code);
  904. }
  905. /* Global error interrupt handler */
  906. static void gsi_isr_glob_err(struct gsi *gsi)
  907. {
  908. enum gsi_err_type type;
  909. enum gsi_err_code code;
  910. u32 which;
  911. u32 val;
  912. u32 ee;
  913. /* Get the logged error, then reinitialize the log */
  914. val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
  915. iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
  916. iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
  917. ee = u32_get_bits(val, ERR_EE_FMASK);
  918. type = u32_get_bits(val, ERR_TYPE_FMASK);
  919. which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
  920. code = u32_get_bits(val, ERR_CODE_FMASK);
  921. if (type == GSI_ERR_TYPE_CHAN)
  922. gsi_isr_glob_chan_err(gsi, ee, which, code);
  923. else if (type == GSI_ERR_TYPE_EVT)
  924. gsi_isr_glob_evt_err(gsi, ee, which, code);
  925. else /* type GSI_ERR_TYPE_GLOB should be fatal */
  926. dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
  927. }
  928. /* Generic EE interrupt handler */
  929. static void gsi_isr_gp_int1(struct gsi *gsi)
  930. {
  931. u32 result;
  932. u32 val;
  933. /* This interrupt is used to handle completions of GENERIC GSI
  934. * commands. We use these to allocate and halt channels on the
  935. * modem's behalf due to a hardware quirk on IPA v4.2. The modem
  936. * "owns" channels even when the AP allocates them, and have no
  937. * way of knowing whether a modem channel's state has been changed.
  938. *
  939. * We also use GENERIC commands to enable/disable channel flow
  940. * control for IPA v4.2+.
  941. *
  942. * It is recommended that we halt the modem channels we allocated
  943. * when shutting down, but it's possible the channel isn't running
  944. * at the time we issue the HALT command. We'll get an error in
  945. * that case, but it's harmless (the channel is already halted).
  946. * Similarly, we could get an error back when updating flow control
  947. * on a channel because it's not in the proper state.
  948. *
  949. * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
  950. * error if we receive it.
  951. */
  952. val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
  953. result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
  954. switch (result) {
  955. case GENERIC_EE_SUCCESS:
  956. case GENERIC_EE_INCORRECT_CHANNEL_STATE:
  957. gsi->result = 0;
  958. break;
  959. case GENERIC_EE_RETRY:
  960. gsi->result = -EAGAIN;
  961. break;
  962. default:
  963. dev_err(gsi->dev, "global INT1 generic result %u\n", result);
  964. gsi->result = -EIO;
  965. break;
  966. }
  967. complete(&gsi->completion);
  968. }
  969. /* Inter-EE interrupt handler */
  970. static void gsi_isr_glob_ee(struct gsi *gsi)
  971. {
  972. u32 val;
  973. val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
  974. if (val & BIT(ERROR_INT))
  975. gsi_isr_glob_err(gsi);
  976. iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
  977. val &= ~BIT(ERROR_INT);
  978. if (val & BIT(GP_INT1)) {
  979. val ^= BIT(GP_INT1);
  980. gsi_isr_gp_int1(gsi);
  981. }
  982. if (val)
  983. dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
  984. }
  985. /* I/O completion interrupt event */
  986. static void gsi_isr_ieob(struct gsi *gsi)
  987. {
  988. u32 event_mask;
  989. event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
  990. gsi_irq_ieob_disable(gsi, event_mask);
  991. iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
  992. while (event_mask) {
  993. u32 evt_ring_id = __ffs(event_mask);
  994. event_mask ^= BIT(evt_ring_id);
  995. napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
  996. }
  997. }
  998. /* General event interrupts represent serious problems, so report them */
  999. static void gsi_isr_general(struct gsi *gsi)
  1000. {
  1001. struct device *dev = gsi->dev;
  1002. u32 val;
  1003. val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
  1004. iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
  1005. dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
  1006. }
  1007. /**
  1008. * gsi_isr() - Top level GSI interrupt service routine
  1009. * @irq: Interrupt number (ignored)
  1010. * @dev_id: GSI pointer supplied to request_irq()
  1011. *
  1012. * This is the main handler function registered for the GSI IRQ. Each type
  1013. * of interrupt has a separate handler function that is called from here.
  1014. */
  1015. static irqreturn_t gsi_isr(int irq, void *dev_id)
  1016. {
  1017. struct gsi *gsi = dev_id;
  1018. u32 intr_mask;
  1019. u32 cnt = 0;
  1020. /* enum gsi_irq_type_id defines GSI interrupt types */
  1021. while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
  1022. /* intr_mask contains bitmask of pending GSI interrupts */
  1023. do {
  1024. u32 gsi_intr = BIT(__ffs(intr_mask));
  1025. intr_mask ^= gsi_intr;
  1026. switch (gsi_intr) {
  1027. case BIT(GSI_CH_CTRL):
  1028. gsi_isr_chan_ctrl(gsi);
  1029. break;
  1030. case BIT(GSI_EV_CTRL):
  1031. gsi_isr_evt_ctrl(gsi);
  1032. break;
  1033. case BIT(GSI_GLOB_EE):
  1034. gsi_isr_glob_ee(gsi);
  1035. break;
  1036. case BIT(GSI_IEOB):
  1037. gsi_isr_ieob(gsi);
  1038. break;
  1039. case BIT(GSI_GENERAL):
  1040. gsi_isr_general(gsi);
  1041. break;
  1042. default:
  1043. dev_err(gsi->dev,
  1044. "unrecognized interrupt type 0x%08x\n",
  1045. gsi_intr);
  1046. break;
  1047. }
  1048. } while (intr_mask);
  1049. if (++cnt > GSI_ISR_MAX_ITER) {
  1050. dev_err(gsi->dev, "interrupt flood\n");
  1051. break;
  1052. }
  1053. }
  1054. return IRQ_HANDLED;
  1055. }
  1056. /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
  1057. static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
  1058. {
  1059. int ret;
  1060. ret = platform_get_irq_byname(pdev, "gsi");
  1061. if (ret <= 0)
  1062. return ret ? : -EINVAL;
  1063. gsi->irq = ret;
  1064. return 0;
  1065. }
  1066. /* Return the transaction associated with a transfer completion event */
  1067. static struct gsi_trans *
  1068. gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
  1069. {
  1070. u32 channel_id = event->chid;
  1071. struct gsi_channel *channel;
  1072. struct gsi_trans *trans;
  1073. u32 tre_offset;
  1074. u32 tre_index;
  1075. channel = &gsi->channel[channel_id];
  1076. if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
  1077. return NULL;
  1078. /* Event xfer_ptr records the TRE it's associated with */
  1079. tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
  1080. tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
  1081. trans = gsi_channel_trans_mapped(channel, tre_index);
  1082. if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
  1083. return NULL;
  1084. return trans;
  1085. }
  1086. /**
  1087. * gsi_evt_ring_update() - Update transaction state from hardware
  1088. * @gsi: GSI pointer
  1089. * @evt_ring_id: Event ring ID
  1090. * @index: Event index in ring reported by hardware
  1091. *
  1092. * Events for RX channels contain the actual number of bytes received into
  1093. * the buffer. Every event has a transaction associated with it, and here
  1094. * we update transactions to record their actual received lengths.
  1095. *
  1096. * When an event for a TX channel arrives we use information in the
  1097. * transaction to report the number of requests and bytes that have
  1098. * been transferred.
  1099. *
  1100. * This function is called whenever we learn that the GSI hardware has filled
  1101. * new events since the last time we checked. The ring's index field tells
  1102. * the first entry in need of processing. The index provided is the
  1103. * first *unfilled* event in the ring (following the last filled one).
  1104. *
  1105. * Events are sequential within the event ring, and transactions are
  1106. * sequential within the transaction array.
  1107. *
  1108. * Note that @index always refers to an element *within* the event ring.
  1109. */
  1110. static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
  1111. {
  1112. struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
  1113. struct gsi_ring *ring = &evt_ring->ring;
  1114. struct gsi_event *event_done;
  1115. struct gsi_event *event;
  1116. u32 event_avail;
  1117. u32 old_index;
  1118. /* Starting with the oldest un-processed event, determine which
  1119. * transaction (and which channel) is associated with the event.
  1120. * For RX channels, update each completed transaction with the
  1121. * number of bytes that were actually received. For TX channels
  1122. * associated with a network device, report to the network stack
  1123. * the number of transfers and bytes this completion represents.
  1124. */
  1125. old_index = ring->index;
  1126. event = gsi_ring_virt(ring, old_index);
  1127. /* Compute the number of events to process before we wrap,
  1128. * and determine when we'll be done processing events.
  1129. */
  1130. event_avail = ring->count - old_index % ring->count;
  1131. event_done = gsi_ring_virt(ring, index);
  1132. do {
  1133. struct gsi_trans *trans;
  1134. trans = gsi_event_trans(gsi, event);
  1135. if (!trans)
  1136. return;
  1137. if (trans->direction == DMA_FROM_DEVICE)
  1138. trans->len = __le16_to_cpu(event->len);
  1139. else
  1140. gsi_trans_tx_completed(trans);
  1141. gsi_trans_move_complete(trans);
  1142. /* Move on to the next event and transaction */
  1143. if (--event_avail)
  1144. event++;
  1145. else
  1146. event = gsi_ring_virt(ring, 0);
  1147. } while (event != event_done);
  1148. /* Tell the hardware we've handled these events */
  1149. gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
  1150. }
  1151. /* Initialize a ring, including allocating DMA memory for its entries */
  1152. static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
  1153. {
  1154. u32 size = count * GSI_RING_ELEMENT_SIZE;
  1155. struct device *dev = gsi->dev;
  1156. dma_addr_t addr;
  1157. /* Hardware requires a 2^n ring size, with alignment equal to size.
  1158. * The DMA address returned by dma_alloc_coherent() is guaranteed to
  1159. * be a power-of-2 number of pages, which satisfies the requirement.
  1160. */
  1161. ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
  1162. if (!ring->virt)
  1163. return -ENOMEM;
  1164. ring->addr = addr;
  1165. ring->count = count;
  1166. ring->index = 0;
  1167. return 0;
  1168. }
  1169. /* Free a previously-allocated ring */
  1170. static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
  1171. {
  1172. size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
  1173. dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
  1174. }
  1175. /* Allocate an available event ring id */
  1176. static int gsi_evt_ring_id_alloc(struct gsi *gsi)
  1177. {
  1178. u32 evt_ring_id;
  1179. if (gsi->event_bitmap == ~0U) {
  1180. dev_err(gsi->dev, "event rings exhausted\n");
  1181. return -ENOSPC;
  1182. }
  1183. evt_ring_id = ffz(gsi->event_bitmap);
  1184. gsi->event_bitmap |= BIT(evt_ring_id);
  1185. return (int)evt_ring_id;
  1186. }
  1187. /* Free a previously-allocated event ring id */
  1188. static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
  1189. {
  1190. gsi->event_bitmap &= ~BIT(evt_ring_id);
  1191. }
  1192. /* Ring a channel doorbell, reporting the first un-filled entry */
  1193. void gsi_channel_doorbell(struct gsi_channel *channel)
  1194. {
  1195. struct gsi_ring *tre_ring = &channel->tre_ring;
  1196. u32 channel_id = gsi_channel_id(channel);
  1197. struct gsi *gsi = channel->gsi;
  1198. u32 val;
  1199. /* Note: index *must* be used modulo the ring count here */
  1200. val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
  1201. iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
  1202. }
  1203. /* Consult hardware, move newly completed transactions to completed state */
  1204. void gsi_channel_update(struct gsi_channel *channel)
  1205. {
  1206. u32 evt_ring_id = channel->evt_ring_id;
  1207. struct gsi *gsi = channel->gsi;
  1208. struct gsi_evt_ring *evt_ring;
  1209. struct gsi_trans *trans;
  1210. struct gsi_ring *ring;
  1211. u32 offset;
  1212. u32 index;
  1213. evt_ring = &gsi->evt_ring[evt_ring_id];
  1214. ring = &evt_ring->ring;
  1215. /* See if there's anything new to process; if not, we're done. Note
  1216. * that index always refers to an entry *within* the event ring.
  1217. */
  1218. offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
  1219. index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
  1220. if (index == ring->index % ring->count)
  1221. return;
  1222. /* Get the transaction for the latest completed event. */
  1223. trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
  1224. if (!trans)
  1225. return;
  1226. /* For RX channels, update each completed transaction with the number
  1227. * of bytes that were actually received. For TX channels, report
  1228. * the number of transactions and bytes this completion represents
  1229. * up the network stack.
  1230. */
  1231. gsi_evt_ring_update(gsi, evt_ring_id, index);
  1232. }
  1233. /**
  1234. * gsi_channel_poll_one() - Return a single completed transaction on a channel
  1235. * @channel: Channel to be polled
  1236. *
  1237. * Return: Transaction pointer, or null if none are available
  1238. *
  1239. * This function returns the first of a channel's completed transactions.
  1240. * If no transactions are in completed state, the hardware is consulted to
  1241. * determine whether any new transactions have completed. If so, they're
  1242. * moved to completed state and the first such transaction is returned.
  1243. * If there are no more completed transactions, a null pointer is returned.
  1244. */
  1245. static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
  1246. {
  1247. struct gsi_trans *trans;
  1248. /* Get the first completed transaction */
  1249. trans = gsi_channel_trans_complete(channel);
  1250. if (trans)
  1251. gsi_trans_move_polled(trans);
  1252. return trans;
  1253. }
  1254. /**
  1255. * gsi_channel_poll() - NAPI poll function for a channel
  1256. * @napi: NAPI structure for the channel
  1257. * @budget: Budget supplied by NAPI core
  1258. *
  1259. * Return: Number of items polled (<= budget)
  1260. *
  1261. * Single transactions completed by hardware are polled until either
  1262. * the budget is exhausted, or there are no more. Each transaction
  1263. * polled is passed to gsi_trans_complete(), to perform remaining
  1264. * completion processing and retire/free the transaction.
  1265. */
  1266. static int gsi_channel_poll(struct napi_struct *napi, int budget)
  1267. {
  1268. struct gsi_channel *channel;
  1269. int count;
  1270. channel = container_of(napi, struct gsi_channel, napi);
  1271. for (count = 0; count < budget; count++) {
  1272. struct gsi_trans *trans;
  1273. trans = gsi_channel_poll_one(channel);
  1274. if (!trans)
  1275. break;
  1276. gsi_trans_complete(trans);
  1277. }
  1278. if (count < budget && napi_complete(napi))
  1279. gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
  1280. return count;
  1281. }
  1282. /* The event bitmap represents which event ids are available for allocation.
  1283. * Set bits are not available, clear bits can be used. This function
  1284. * initializes the map so all events supported by the hardware are available,
  1285. * then precludes any reserved events from being allocated.
  1286. */
  1287. static u32 gsi_event_bitmap_init(u32 evt_ring_max)
  1288. {
  1289. u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
  1290. event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
  1291. return event_bitmap;
  1292. }
  1293. /* Setup function for a single channel */
  1294. static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
  1295. {
  1296. struct gsi_channel *channel = &gsi->channel[channel_id];
  1297. u32 evt_ring_id = channel->evt_ring_id;
  1298. int ret;
  1299. if (!gsi_channel_initialized(channel))
  1300. return 0;
  1301. ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
  1302. if (ret)
  1303. return ret;
  1304. gsi_evt_ring_program(gsi, evt_ring_id);
  1305. ret = gsi_channel_alloc_command(gsi, channel_id);
  1306. if (ret)
  1307. goto err_evt_ring_de_alloc;
  1308. gsi_channel_program(channel, true);
  1309. if (channel->toward_ipa)
  1310. netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
  1311. gsi_channel_poll);
  1312. else
  1313. netif_napi_add(&gsi->dummy_dev, &channel->napi,
  1314. gsi_channel_poll);
  1315. return 0;
  1316. err_evt_ring_de_alloc:
  1317. /* We've done nothing with the event ring yet so don't reset */
  1318. gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
  1319. return ret;
  1320. }
  1321. /* Inverse of gsi_channel_setup_one() */
  1322. static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
  1323. {
  1324. struct gsi_channel *channel = &gsi->channel[channel_id];
  1325. u32 evt_ring_id = channel->evt_ring_id;
  1326. if (!gsi_channel_initialized(channel))
  1327. return;
  1328. netif_napi_del(&channel->napi);
  1329. gsi_channel_de_alloc_command(gsi, channel_id);
  1330. gsi_evt_ring_reset_command(gsi, evt_ring_id);
  1331. gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
  1332. }
  1333. /* We use generic commands only to operate on modem channels. We don't have
  1334. * the ability to determine channel state for a modem channel, so we simply
  1335. * issue the command and wait for it to complete.
  1336. */
  1337. static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
  1338. enum gsi_generic_cmd_opcode opcode,
  1339. u8 params)
  1340. {
  1341. bool timeout;
  1342. u32 val;
  1343. /* The error global interrupt type is always enabled (until we tear
  1344. * down), so we will keep it enabled.
  1345. *
  1346. * A generic EE command completes with a GSI global interrupt of
  1347. * type GP_INT1. We only perform one generic command at a time
  1348. * (to allocate, halt, or enable/disable flow control on a modem
  1349. * channel), and only from this function. So we enable the GP_INT1
  1350. * IRQ type here, and disable it again after the command completes.
  1351. */
  1352. val = BIT(ERROR_INT) | BIT(GP_INT1);
  1353. iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
  1354. /* First zero the result code field */
  1355. val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
  1356. val &= ~GENERIC_EE_RESULT_FMASK;
  1357. iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
  1358. /* Now issue the command */
  1359. val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
  1360. val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
  1361. val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
  1362. if (gsi->version >= IPA_VERSION_4_11)
  1363. val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
  1364. timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
  1365. /* Disable the GP_INT1 IRQ type again */
  1366. iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
  1367. if (!timeout)
  1368. return gsi->result;
  1369. dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
  1370. opcode, channel_id);
  1371. return -ETIMEDOUT;
  1372. }
  1373. static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
  1374. {
  1375. return gsi_generic_command(gsi, channel_id,
  1376. GSI_GENERIC_ALLOCATE_CHANNEL, 0);
  1377. }
  1378. static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
  1379. {
  1380. u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
  1381. int ret;
  1382. do
  1383. ret = gsi_generic_command(gsi, channel_id,
  1384. GSI_GENERIC_HALT_CHANNEL, 0);
  1385. while (ret == -EAGAIN && retries--);
  1386. if (ret)
  1387. dev_err(gsi->dev, "error %d halting modem channel %u\n",
  1388. ret, channel_id);
  1389. }
  1390. /* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
  1391. void
  1392. gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
  1393. {
  1394. u32 retries = 0;
  1395. u32 command;
  1396. int ret;
  1397. command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
  1398. : GSI_GENERIC_DISABLE_FLOW_CONTROL;
  1399. /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
  1400. * is underway. In this case we need to retry the command.
  1401. */
  1402. if (!enable && gsi->version >= IPA_VERSION_4_11)
  1403. retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
  1404. do
  1405. ret = gsi_generic_command(gsi, channel_id, command, 0);
  1406. while (ret == -EAGAIN && retries--);
  1407. if (ret)
  1408. dev_err(gsi->dev,
  1409. "error %d %sabling mode channel %u flow control\n",
  1410. ret, enable ? "en" : "dis", channel_id);
  1411. }
  1412. /* Setup function for channels */
  1413. static int gsi_channel_setup(struct gsi *gsi)
  1414. {
  1415. u32 channel_id = 0;
  1416. u32 mask;
  1417. int ret;
  1418. gsi_irq_enable(gsi);
  1419. mutex_lock(&gsi->mutex);
  1420. do {
  1421. ret = gsi_channel_setup_one(gsi, channel_id);
  1422. if (ret)
  1423. goto err_unwind;
  1424. } while (++channel_id < gsi->channel_count);
  1425. /* Make sure no channels were defined that hardware does not support */
  1426. while (channel_id < GSI_CHANNEL_COUNT_MAX) {
  1427. struct gsi_channel *channel = &gsi->channel[channel_id++];
  1428. if (!gsi_channel_initialized(channel))
  1429. continue;
  1430. ret = -EINVAL;
  1431. dev_err(gsi->dev, "channel %u not supported by hardware\n",
  1432. channel_id - 1);
  1433. channel_id = gsi->channel_count;
  1434. goto err_unwind;
  1435. }
  1436. /* Allocate modem channels if necessary */
  1437. mask = gsi->modem_channel_bitmap;
  1438. while (mask) {
  1439. u32 modem_channel_id = __ffs(mask);
  1440. ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
  1441. if (ret)
  1442. goto err_unwind_modem;
  1443. /* Clear bit from mask only after success (for unwind) */
  1444. mask ^= BIT(modem_channel_id);
  1445. }
  1446. mutex_unlock(&gsi->mutex);
  1447. return 0;
  1448. err_unwind_modem:
  1449. /* Compute which modem channels need to be deallocated */
  1450. mask ^= gsi->modem_channel_bitmap;
  1451. while (mask) {
  1452. channel_id = __fls(mask);
  1453. mask ^= BIT(channel_id);
  1454. gsi_modem_channel_halt(gsi, channel_id);
  1455. }
  1456. err_unwind:
  1457. while (channel_id--)
  1458. gsi_channel_teardown_one(gsi, channel_id);
  1459. mutex_unlock(&gsi->mutex);
  1460. gsi_irq_disable(gsi);
  1461. return ret;
  1462. }
  1463. /* Inverse of gsi_channel_setup() */
  1464. static void gsi_channel_teardown(struct gsi *gsi)
  1465. {
  1466. u32 mask = gsi->modem_channel_bitmap;
  1467. u32 channel_id;
  1468. mutex_lock(&gsi->mutex);
  1469. while (mask) {
  1470. channel_id = __fls(mask);
  1471. mask ^= BIT(channel_id);
  1472. gsi_modem_channel_halt(gsi, channel_id);
  1473. }
  1474. channel_id = gsi->channel_count - 1;
  1475. do
  1476. gsi_channel_teardown_one(gsi, channel_id);
  1477. while (channel_id--);
  1478. mutex_unlock(&gsi->mutex);
  1479. gsi_irq_disable(gsi);
  1480. }
  1481. /* Turn off all GSI interrupts initially */
  1482. static int gsi_irq_setup(struct gsi *gsi)
  1483. {
  1484. int ret;
  1485. /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
  1486. iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
  1487. /* Disable all interrupt types */
  1488. gsi_irq_type_update(gsi, 0);
  1489. /* Clear all type-specific interrupt masks */
  1490. iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
  1491. iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
  1492. iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
  1493. iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
  1494. /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
  1495. if (gsi->version > IPA_VERSION_3_1) {
  1496. u32 offset;
  1497. /* These registers are in the non-adjusted address range */
  1498. offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
  1499. iowrite32(0, gsi->virt_raw + offset);
  1500. offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
  1501. iowrite32(0, gsi->virt_raw + offset);
  1502. }
  1503. iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
  1504. ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
  1505. if (ret)
  1506. dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
  1507. return ret;
  1508. }
  1509. static void gsi_irq_teardown(struct gsi *gsi)
  1510. {
  1511. free_irq(gsi->irq, gsi);
  1512. }
  1513. /* Get # supported channel and event rings; there is no gsi_ring_teardown() */
  1514. static int gsi_ring_setup(struct gsi *gsi)
  1515. {
  1516. struct device *dev = gsi->dev;
  1517. u32 count;
  1518. u32 val;
  1519. if (gsi->version < IPA_VERSION_3_5_1) {
  1520. /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
  1521. gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
  1522. gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
  1523. return 0;
  1524. }
  1525. val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
  1526. count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
  1527. if (!count) {
  1528. dev_err(dev, "GSI reports zero channels supported\n");
  1529. return -EINVAL;
  1530. }
  1531. if (count > GSI_CHANNEL_COUNT_MAX) {
  1532. dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
  1533. GSI_CHANNEL_COUNT_MAX, count);
  1534. count = GSI_CHANNEL_COUNT_MAX;
  1535. }
  1536. gsi->channel_count = count;
  1537. count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
  1538. if (!count) {
  1539. dev_err(dev, "GSI reports zero event rings supported\n");
  1540. return -EINVAL;
  1541. }
  1542. if (count > GSI_EVT_RING_COUNT_MAX) {
  1543. dev_warn(dev,
  1544. "limiting to %u event rings; hardware supports %u\n",
  1545. GSI_EVT_RING_COUNT_MAX, count);
  1546. count = GSI_EVT_RING_COUNT_MAX;
  1547. }
  1548. gsi->evt_ring_count = count;
  1549. return 0;
  1550. }
  1551. /* Setup function for GSI. GSI firmware must be loaded and initialized */
  1552. int gsi_setup(struct gsi *gsi)
  1553. {
  1554. u32 val;
  1555. int ret;
  1556. /* Here is where we first touch the GSI hardware */
  1557. val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
  1558. if (!(val & ENABLED_FMASK)) {
  1559. dev_err(gsi->dev, "GSI has not been enabled\n");
  1560. return -EIO;
  1561. }
  1562. ret = gsi_irq_setup(gsi);
  1563. if (ret)
  1564. return ret;
  1565. ret = gsi_ring_setup(gsi); /* No matching teardown required */
  1566. if (ret)
  1567. goto err_irq_teardown;
  1568. /* Initialize the error log */
  1569. iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
  1570. ret = gsi_channel_setup(gsi);
  1571. if (ret)
  1572. goto err_irq_teardown;
  1573. return 0;
  1574. err_irq_teardown:
  1575. gsi_irq_teardown(gsi);
  1576. return ret;
  1577. }
  1578. /* Inverse of gsi_setup() */
  1579. void gsi_teardown(struct gsi *gsi)
  1580. {
  1581. gsi_channel_teardown(gsi);
  1582. gsi_irq_teardown(gsi);
  1583. }
  1584. /* Initialize a channel's event ring */
  1585. static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
  1586. {
  1587. struct gsi *gsi = channel->gsi;
  1588. struct gsi_evt_ring *evt_ring;
  1589. int ret;
  1590. ret = gsi_evt_ring_id_alloc(gsi);
  1591. if (ret < 0)
  1592. return ret;
  1593. channel->evt_ring_id = ret;
  1594. evt_ring = &gsi->evt_ring[channel->evt_ring_id];
  1595. evt_ring->channel = channel;
  1596. ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
  1597. if (!ret)
  1598. return 0; /* Success! */
  1599. dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
  1600. ret, gsi_channel_id(channel));
  1601. gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
  1602. return ret;
  1603. }
  1604. /* Inverse of gsi_channel_evt_ring_init() */
  1605. static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
  1606. {
  1607. u32 evt_ring_id = channel->evt_ring_id;
  1608. struct gsi *gsi = channel->gsi;
  1609. struct gsi_evt_ring *evt_ring;
  1610. evt_ring = &gsi->evt_ring[evt_ring_id];
  1611. gsi_ring_free(gsi, &evt_ring->ring);
  1612. gsi_evt_ring_id_free(gsi, evt_ring_id);
  1613. }
  1614. static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
  1615. const struct ipa_gsi_endpoint_data *data)
  1616. {
  1617. const struct gsi_channel_data *channel_data;
  1618. u32 channel_id = data->channel_id;
  1619. struct device *dev = gsi->dev;
  1620. /* Make sure channel ids are in the range driver supports */
  1621. if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
  1622. dev_err(dev, "bad channel id %u; must be less than %u\n",
  1623. channel_id, GSI_CHANNEL_COUNT_MAX);
  1624. return false;
  1625. }
  1626. if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
  1627. dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
  1628. return false;
  1629. }
  1630. if (command && !data->toward_ipa) {
  1631. dev_err(dev, "command channel %u is not TX\n", channel_id);
  1632. return false;
  1633. }
  1634. channel_data = &data->channel;
  1635. if (!channel_data->tlv_count ||
  1636. channel_data->tlv_count > GSI_TLV_MAX) {
  1637. dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
  1638. channel_id, channel_data->tlv_count, GSI_TLV_MAX);
  1639. return false;
  1640. }
  1641. if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
  1642. dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
  1643. channel_id, IPA_COMMAND_TRANS_TRE_MAX,
  1644. channel_data->tlv_count);
  1645. return false;
  1646. }
  1647. /* We have to allow at least one maximally-sized transaction to
  1648. * be outstanding (which would use tlv_count TREs). Given how
  1649. * gsi_channel_tre_max() is computed, tre_count has to be almost
  1650. * twice the TLV FIFO size to satisfy this requirement.
  1651. */
  1652. if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
  1653. dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
  1654. channel_id, channel_data->tlv_count,
  1655. channel_data->tre_count);
  1656. return false;
  1657. }
  1658. if (!is_power_of_2(channel_data->tre_count)) {
  1659. dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
  1660. channel_id, channel_data->tre_count);
  1661. return false;
  1662. }
  1663. if (!is_power_of_2(channel_data->event_count)) {
  1664. dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
  1665. channel_id, channel_data->event_count);
  1666. return false;
  1667. }
  1668. return true;
  1669. }
  1670. /* Init function for a single channel */
  1671. static int gsi_channel_init_one(struct gsi *gsi,
  1672. const struct ipa_gsi_endpoint_data *data,
  1673. bool command)
  1674. {
  1675. struct gsi_channel *channel;
  1676. u32 tre_count;
  1677. int ret;
  1678. if (!gsi_channel_data_valid(gsi, command, data))
  1679. return -EINVAL;
  1680. /* Worst case we need an event for every outstanding TRE */
  1681. if (data->channel.tre_count > data->channel.event_count) {
  1682. tre_count = data->channel.event_count;
  1683. dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
  1684. data->channel_id, tre_count);
  1685. } else {
  1686. tre_count = data->channel.tre_count;
  1687. }
  1688. channel = &gsi->channel[data->channel_id];
  1689. memset(channel, 0, sizeof(*channel));
  1690. channel->gsi = gsi;
  1691. channel->toward_ipa = data->toward_ipa;
  1692. channel->command = command;
  1693. channel->trans_tre_max = data->channel.tlv_count;
  1694. channel->tre_count = tre_count;
  1695. channel->event_count = data->channel.event_count;
  1696. ret = gsi_channel_evt_ring_init(channel);
  1697. if (ret)
  1698. goto err_clear_gsi;
  1699. ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
  1700. if (ret) {
  1701. dev_err(gsi->dev, "error %d allocating channel %u ring\n",
  1702. ret, data->channel_id);
  1703. goto err_channel_evt_ring_exit;
  1704. }
  1705. ret = gsi_channel_trans_init(gsi, data->channel_id);
  1706. if (ret)
  1707. goto err_ring_free;
  1708. if (command) {
  1709. u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
  1710. ret = ipa_cmd_pool_init(channel, tre_max);
  1711. }
  1712. if (!ret)
  1713. return 0; /* Success! */
  1714. gsi_channel_trans_exit(channel);
  1715. err_ring_free:
  1716. gsi_ring_free(gsi, &channel->tre_ring);
  1717. err_channel_evt_ring_exit:
  1718. gsi_channel_evt_ring_exit(channel);
  1719. err_clear_gsi:
  1720. channel->gsi = NULL; /* Mark it not (fully) initialized */
  1721. return ret;
  1722. }
  1723. /* Inverse of gsi_channel_init_one() */
  1724. static void gsi_channel_exit_one(struct gsi_channel *channel)
  1725. {
  1726. if (!gsi_channel_initialized(channel))
  1727. return;
  1728. if (channel->command)
  1729. ipa_cmd_pool_exit(channel);
  1730. gsi_channel_trans_exit(channel);
  1731. gsi_ring_free(channel->gsi, &channel->tre_ring);
  1732. gsi_channel_evt_ring_exit(channel);
  1733. }
  1734. /* Init function for channels */
  1735. static int gsi_channel_init(struct gsi *gsi, u32 count,
  1736. const struct ipa_gsi_endpoint_data *data)
  1737. {
  1738. bool modem_alloc;
  1739. int ret = 0;
  1740. u32 i;
  1741. /* IPA v4.2 requires the AP to allocate channels for the modem */
  1742. modem_alloc = gsi->version == IPA_VERSION_4_2;
  1743. gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
  1744. gsi->ieob_enabled_bitmap = 0;
  1745. /* The endpoint data array is indexed by endpoint name */
  1746. for (i = 0; i < count; i++) {
  1747. bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
  1748. if (ipa_gsi_endpoint_data_empty(&data[i]))
  1749. continue; /* Skip over empty slots */
  1750. /* Mark modem channels to be allocated (hardware workaround) */
  1751. if (data[i].ee_id == GSI_EE_MODEM) {
  1752. if (modem_alloc)
  1753. gsi->modem_channel_bitmap |=
  1754. BIT(data[i].channel_id);
  1755. continue;
  1756. }
  1757. ret = gsi_channel_init_one(gsi, &data[i], command);
  1758. if (ret)
  1759. goto err_unwind;
  1760. }
  1761. return ret;
  1762. err_unwind:
  1763. while (i--) {
  1764. if (ipa_gsi_endpoint_data_empty(&data[i]))
  1765. continue;
  1766. if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
  1767. gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
  1768. continue;
  1769. }
  1770. gsi_channel_exit_one(&gsi->channel[data->channel_id]);
  1771. }
  1772. return ret;
  1773. }
  1774. /* Inverse of gsi_channel_init() */
  1775. static void gsi_channel_exit(struct gsi *gsi)
  1776. {
  1777. u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
  1778. do
  1779. gsi_channel_exit_one(&gsi->channel[channel_id]);
  1780. while (channel_id--);
  1781. gsi->modem_channel_bitmap = 0;
  1782. }
  1783. /* Init function for GSI. GSI hardware does not need to be "ready" */
  1784. int gsi_init(struct gsi *gsi, struct platform_device *pdev,
  1785. enum ipa_version version, u32 count,
  1786. const struct ipa_gsi_endpoint_data *data)
  1787. {
  1788. struct device *dev = &pdev->dev;
  1789. struct resource *res;
  1790. resource_size_t size;
  1791. u32 adjust;
  1792. int ret;
  1793. gsi_validate_build();
  1794. gsi->dev = dev;
  1795. gsi->version = version;
  1796. /* GSI uses NAPI on all channels. Create a dummy network device
  1797. * for the channel NAPI contexts to be associated with.
  1798. */
  1799. init_dummy_netdev(&gsi->dummy_dev);
  1800. /* Get GSI memory range and map it */
  1801. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
  1802. if (!res) {
  1803. dev_err(dev, "DT error getting \"gsi\" memory property\n");
  1804. return -ENODEV;
  1805. }
  1806. size = resource_size(res);
  1807. if (res->start > U32_MAX || size > U32_MAX - res->start) {
  1808. dev_err(dev, "DT memory resource \"gsi\" out of range\n");
  1809. return -EINVAL;
  1810. }
  1811. /* Make sure we can make our pointer adjustment if necessary */
  1812. adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
  1813. if (res->start < adjust) {
  1814. dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
  1815. adjust);
  1816. return -EINVAL;
  1817. }
  1818. gsi->virt_raw = ioremap(res->start, size);
  1819. if (!gsi->virt_raw) {
  1820. dev_err(dev, "unable to remap \"gsi\" memory\n");
  1821. return -ENOMEM;
  1822. }
  1823. /* Most registers are accessed using an adjusted register range */
  1824. gsi->virt = gsi->virt_raw - adjust;
  1825. init_completion(&gsi->completion);
  1826. ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
  1827. if (ret)
  1828. goto err_iounmap;
  1829. ret = gsi_channel_init(gsi, count, data);
  1830. if (ret)
  1831. goto err_iounmap;
  1832. mutex_init(&gsi->mutex);
  1833. return 0;
  1834. err_iounmap:
  1835. iounmap(gsi->virt_raw);
  1836. return ret;
  1837. }
  1838. /* Inverse of gsi_init() */
  1839. void gsi_exit(struct gsi *gsi)
  1840. {
  1841. mutex_destroy(&gsi->mutex);
  1842. gsi_channel_exit(gsi);
  1843. iounmap(gsi->virt_raw);
  1844. }
  1845. /* The maximum number of outstanding TREs on a channel. This limits
  1846. * a channel's maximum number of transactions outstanding (worst case
  1847. * is one TRE per transaction).
  1848. *
  1849. * The absolute limit is the number of TREs in the channel's TRE ring,
  1850. * and in theory we should be able use all of them. But in practice,
  1851. * doing that led to the hardware reporting exhaustion of event ring
  1852. * slots for writing completion information. So the hardware limit
  1853. * would be (tre_count - 1).
  1854. *
  1855. * We reduce it a bit further though. Transaction resource pools are
  1856. * sized to be a little larger than this maximum, to allow resource
  1857. * allocations to always be contiguous. The number of entries in a
  1858. * TRE ring buffer is a power of 2, and the extra resources in a pool
  1859. * tends to nearly double the memory allocated for it. Reducing the
  1860. * maximum number of outstanding TREs allows the number of entries in
  1861. * a pool to avoid crossing that power-of-2 boundary, and this can
  1862. * substantially reduce pool memory requirements. The number we
  1863. * reduce it by matches the number added in gsi_trans_pool_init().
  1864. */
  1865. u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
  1866. {
  1867. struct gsi_channel *channel = &gsi->channel[channel_id];
  1868. /* Hardware limit is channel->tre_count - 1 */
  1869. return channel->tre_count - (channel->trans_tre_max - 1);
  1870. }