adreno.h 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __ADRENO_H
  7. #define __ADRENO_H
  8. #include <linux/iopoll.h>
  9. #include <linux/of.h>
  10. #include <linux/soc/qcom/llcc-qcom.h>
  11. #include "adreno_coresight.h"
  12. #include "adreno_dispatch.h"
  13. #include "adreno_drawctxt.h"
  14. #include "adreno_hfi.h"
  15. #include "adreno_hwsched.h"
  16. #include "adreno_perfcounter.h"
  17. #include "adreno_profile.h"
  18. #include "adreno_ringbuffer.h"
  19. #include "kgsl_sharedmem.h"
  20. /* Used to point CP to the SMMU record during preemption */
  21. #define SET_PSEUDO_SMMU_INFO 0
  22. /* Used to inform CP where to save preemption data at the time of switch out */
  23. #define SET_PSEUDO_PRIV_NON_SECURE_SAVE_ADDR 1
  24. /* Used to inform CP where to save secure preemption data at the time of switch out */
  25. #define SET_PSEUDO_PRIV_SECURE_SAVE_ADDR 2
  26. /* Used to inform CP where to save per context non-secure data at the time of switch out */
  27. #define SET_PSEUDO_NON_PRIV_SAVE_ADDR 3
  28. /* Used to inform CP where to save preemption counter data at the time of switch out */
  29. #define SET_PSEUDO_COUNTER 4
  30. /* Index to preemption scratch buffer to store current QOS value */
  31. #define QOS_VALUE_IDX KGSL_PRIORITY_MAX_RB_LEVELS
  32. /* ADRENO_DEVICE - Given a kgsl_device return the adreno device struct */
  33. #define ADRENO_DEVICE(device) \
  34. container_of(device, struct adreno_device, dev)
  35. /* KGSL_DEVICE - given an adreno_device, return the KGSL device struct */
  36. #define KGSL_DEVICE(_dev) (&((_dev)->dev))
  37. /* ADRENO_CONTEXT - Given a context return the adreno context struct */
  38. #define ADRENO_CONTEXT(context) \
  39. container_of(context, struct adreno_context, base)
  40. /* ADRENO_GPU_DEVICE - Given an adreno device return the GPU specific struct */
  41. #define ADRENO_GPU_DEVICE(_a) ((_a)->gpucore->gpudev)
  42. /*
  43. * ADRENO_POWER_OPS - Given an adreno device return the GPU specific power
  44. * ops
  45. */
  46. #define ADRENO_POWER_OPS(_a) ((_a)->gpucore->gpudev->power_ops)
  47. #define ADRENO_CHIPID_CORE(_id) FIELD_GET(GENMASK(31, 24), _id)
  48. #define ADRENO_CHIPID_MAJOR(_id) FIELD_GET(GENMASK(23, 16), _id)
  49. #define ADRENO_CHIPID_MINOR(_id) FIELD_GET(GENMASK(15, 8), _id)
  50. #define ADRENO_CHIPID_PATCH(_id) FIELD_GET(GENMASK(7, 0), _id)
  51. #define ADRENO_GMU_CHIPID(_id) \
  52. (FIELD_PREP(GENMASK(31, 24), ADRENO_CHIPID_CORE(_id)) | \
  53. FIELD_PREP(GENMASK(23, 16), ADRENO_CHIPID_MAJOR(_id)) | \
  54. FIELD_PREP(GENMASK(15, 12), ADRENO_CHIPID_MINOR(_id)) | \
  55. FIELD_PREP(GENMASK(11, 8), ADRENO_CHIPID_PATCH(_id)))
  56. #define ADRENO_REV_MAJOR(_rev) FIELD_GET(GENMASK(23, 16), _rev)
  57. #define ADRENO_REV_MINOR(_rev) FIELD_GET(GENMASK(15, 8), _rev)
  58. #define ADRENO_REV_PATCH(_rev) FIELD_GET(GENMASK(7, 0), _rev)
  59. #define ADRENO_GMU_REV(_rev) \
  60. (FIELD_PREP(GENMASK(31, 24), ADRENO_REV_MAJOR(_rev)) | \
  61. FIELD_PREP(GENMASK(23, 16), ADRENO_REV_MINOR(_rev)) | \
  62. FIELD_PREP(GENMASK(15, 8), ADRENO_REV_PATCH(_rev)))
  63. /* ADRENO_GPUREV - Return the GPU ID for the given adreno_device */
  64. #define ADRENO_GPUREV(_a) ((_a)->gpucore->gpurev)
  65. /*
  66. * ADRENO_FEATURE - return true if the specified feature is supported by the GPU
  67. * core
  68. */
  69. #define ADRENO_FEATURE(_dev, _bit) \
  70. ((_dev)->gpucore->features & (_bit))
  71. /**
  72. * ADRENO_QUIRK - return true if the specified quirk is required by the GPU
  73. */
  74. #define ADRENO_QUIRK(_dev, _bit) \
  75. ((_dev)->quirks & (_bit))
  76. #define ADRENO_FW(a, f) (&(a->fw[f]))
  77. /* Adreno core features */
  78. /* The core supports SP/TP hw controlled power collapse */
  79. #define ADRENO_SPTP_PC BIT(0)
  80. /* The GPU supports content protection */
  81. #define ADRENO_CONTENT_PROTECTION BIT(1)
  82. /* The GPU supports preemption */
  83. #define ADRENO_PREEMPTION BIT(2)
  84. /* The GPMU supports Limits Management */
  85. #define ADRENO_LM BIT(3)
  86. /* The GPU supports retention for cpz registers */
  87. #define ADRENO_CPZ_RETENTION BIT(4)
  88. /* The core has soft fault detection available */
  89. #define ADRENO_SOFT_FAULT_DETECT BIT(5)
  90. /* The GMU supports IFPC power management*/
  91. #define ADRENO_IFPC BIT(6)
  92. /* The core supports IO-coherent memory */
  93. #define ADRENO_IOCOHERENT BIT(7)
  94. /*
  95. * The GMU supports Adaptive Clock Distribution (ACD)
  96. * for droop mitigation
  97. */
  98. #define ADRENO_ACD BIT(8)
  99. /* Cooperative reset enabled GMU */
  100. #define ADRENO_COOP_RESET BIT(9)
  101. /* Indicates that the specific target is no longer supported */
  102. #define ADRENO_DEPRECATED BIT(10)
  103. /* The target supports ringbuffer level APRIV */
  104. #define ADRENO_APRIV BIT(11)
  105. /* The GMU supports Battery Current Limiting */
  106. #define ADRENO_BCL BIT(12)
  107. /* L3 voting is supported with L3 constraints */
  108. #define ADRENO_L3_VOTE BIT(13)
  109. /* LPAC is supported */
  110. #define ADRENO_LPAC BIT(14)
  111. /* Late Stage Reprojection (LSR) enablment for GMU */
  112. #define ADRENO_LSR BIT(15)
  113. /* GMU and kernel supports hardware fences */
  114. #define ADRENO_HW_FENCE BIT(16)
  115. /* Dynamic Mode Switching supported on this target */
  116. #define ADRENO_DMS BIT(17)
  117. /* AQE supported on this target */
  118. #define ADRENO_AQE BIT(18)
  119. /* Warm Boot supported on this target */
  120. #define ADRENO_GMU_WARMBOOT BIT(19)
  121. /* The GPU supports CLX */
  122. #define ADRENO_CLX BIT(20)
  123. /*
  124. * Adreno GPU quirks - control bits for various workarounds
  125. */
  126. /* Set TWOPASSUSEWFI in PC_DBG_ECO_CNTL (5XX/6XX) */
  127. #define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
  128. /* Submit critical packets at GPU wake up */
  129. #define ADRENO_QUIRK_CRITICAL_PACKETS BIT(1)
  130. /* Mask out RB1-3 activity signals from HW hang detection logic */
  131. #define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(2)
  132. /* Disable RB sampler datapath clock gating optimization */
  133. #define ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING BIT(3)
  134. /* Disable local memory(LM) feature to avoid corner case error */
  135. #define ADRENO_QUIRK_DISABLE_LMLOADKILL BIT(4)
  136. /* Allow HFI to use registers to send message to GMU */
  137. #define ADRENO_QUIRK_HFI_USE_REG BIT(5)
  138. /* Only set protected SECVID registers once */
  139. #define ADRENO_QUIRK_SECVID_SET_ONCE BIT(6)
  140. /*
  141. * Limit number of read and write transactions from
  142. * UCHE block to GBIF to avoid possible deadlock
  143. * between GBIF, SMMU and MEMNOC.
  144. */
  145. #define ADRENO_QUIRK_LIMIT_UCHE_GBIF_RW BIT(8)
  146. /* Do explicit mode control of cx gdsc */
  147. #define ADRENO_QUIRK_CX_GDSC BIT(9)
  148. /* Command identifiers */
  149. #define CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
  150. #define CMD_IDENTIFIER 0x2EEDFACE
  151. #define CMD_INTERNAL_IDENTIFIER 0x2EEDD00D
  152. #define START_IB_IDENTIFIER 0x2EADEABE
  153. #define END_IB_IDENTIFIER 0x2ABEDEAD
  154. #define START_PROFILE_IDENTIFIER 0x2DEFADE1
  155. #define END_PROFILE_IDENTIFIER 0x2DEFADE2
  156. #define PWRON_FIXUP_IDENTIFIER 0x2AFAFAFA
  157. /* One cannot wait forever for the core to idle, so set an upper limit to the
  158. * amount of time to wait for the core to go idle
  159. */
  160. #define ADRENO_IDLE_TIMEOUT (20 * 1000)
  161. #define ADRENO_FW_PFP 0
  162. #define ADRENO_FW_SQE 0
  163. #define ADRENO_FW_PM4 1
  164. #define ADRENO_FW_AQE 1
  165. #define ADRENO_GPUREV_VALUE(_major, _minor, _patchid) (((_major & 0xFF) << 16) | \
  166. ((_minor & 0xFF) << 8) | \
  167. (_patchid & 0xFF))
  168. enum adreno_gpurev {
  169. ADRENO_REV_UNKNOWN = 0,
  170. ADRENO_REV_A304 = 304,
  171. ADRENO_REV_A305 = 305,
  172. ADRENO_REV_A305C = 306,
  173. ADRENO_REV_A306 = 307,
  174. ADRENO_REV_A306A = 308,
  175. ADRENO_REV_A310 = 310,
  176. ADRENO_REV_A320 = 320,
  177. ADRENO_REV_A330 = 330,
  178. ADRENO_REV_A305B = 335,
  179. ADRENO_REV_A405 = 405,
  180. ADRENO_REV_A418 = 418,
  181. ADRENO_REV_A420 = 420,
  182. ADRENO_REV_A430 = 430,
  183. ADRENO_REV_A505 = 505,
  184. ADRENO_REV_A506 = 506,
  185. ADRENO_REV_A508 = 508,
  186. ADRENO_REV_A510 = 510,
  187. ADRENO_REV_A512 = 512,
  188. ADRENO_REV_A530 = 530,
  189. ADRENO_REV_A540 = 540,
  190. ADRENO_REV_A610 = 610,
  191. ADRENO_REV_A611 = 611,
  192. ADRENO_REV_A612 = 612,
  193. ADRENO_REV_A615 = 615,
  194. ADRENO_REV_A616 = 616,
  195. ADRENO_REV_A618 = 618,
  196. ADRENO_REV_A619 = 619,
  197. ADRENO_REV_A620 = 620,
  198. ADRENO_REV_A621 = 621,
  199. ADRENO_REV_A630 = 630,
  200. ADRENO_REV_A635 = 635,
  201. ADRENO_REV_A640 = 640,
  202. ADRENO_REV_A650 = 650,
  203. ADRENO_REV_A660 = 660,
  204. ADRENO_REV_A662 = 662,
  205. ADRENO_REV_A663 = 663,
  206. ADRENO_REV_A680 = 680,
  207. ADRENO_REV_A702 = 702,
  208. /*
  209. * Gen7 and higher version numbers may exceed 1 digit
  210. * Bits 16-23: Major
  211. * Bits 8-15: Minor
  212. * Bits 0-7: Patch id
  213. */
  214. ADRENO_REV_GEN7_0_0 = ADRENO_GPUREV_VALUE(7, 0, 0),
  215. ADRENO_REV_GEN7_0_1 = ADRENO_GPUREV_VALUE(7, 0, 1),
  216. ADRENO_REV_GEN7_2_0 = ADRENO_GPUREV_VALUE(7, 2, 0),
  217. ADRENO_REV_GEN7_2_1 = ADRENO_GPUREV_VALUE(7, 2, 1),
  218. ADRENO_REV_GEN7_4_0 = ADRENO_GPUREV_VALUE(7, 4, 0),
  219. ADRENO_REV_GEN7_9_0 = ADRENO_GPUREV_VALUE(7, 9, 0),
  220. ADRENO_REV_GEN7_9_1 = ADRENO_GPUREV_VALUE(7, 9, 1),
  221. ADRENO_REV_GEN7_11_0 = ADRENO_GPUREV_VALUE(7, 11, 0),
  222. ADRENO_REV_GEN8_3_0 = ADRENO_GPUREV_VALUE(8, 3, 0),
  223. };
  224. #define ADRENO_SOFT_FAULT BIT(0)
  225. #define ADRENO_HARD_FAULT BIT(1)
  226. #define ADRENO_TIMEOUT_FAULT BIT(2)
  227. #define ADRENO_IOMMU_PAGE_FAULT BIT(3)
  228. #define ADRENO_PREEMPT_FAULT BIT(4)
  229. #define ADRENO_GMU_FAULT BIT(5)
  230. #define ADRENO_CTX_DETATCH_TIMEOUT_FAULT BIT(6)
  231. #define ADRENO_GMU_FAULT_SKIP_SNAPSHOT BIT(7)
  232. enum adreno_pipe_type {
  233. PIPE_NONE = 0,
  234. PIPE_BR = 1,
  235. PIPE_BV = 2,
  236. PIPE_LPAC = 3,
  237. PIPE_AQE0 = 4,
  238. PIPE_AQE1 = 5,
  239. PIPE_DDE_BR = 6,
  240. PIPE_DDE_BV = 7,
  241. };
  242. /**
  243. * Bit fields for GPU_CX_MISC_CX_AHB_*_CNTL registers
  244. * AHB_TXFRTIMEOUTRELEASE [8:8]
  245. * AHB_TXFRTIMEOUTENABLE [9:9]
  246. * AHB_RESPONDERROR [11:11]
  247. * AHB_ERRORSTATUSENABLE [12:12]
  248. */
  249. #define ADRENO_AHB_CNTL_DEFAULT (BIT(12) | BIT(11) | BIT(9) | BIT(8))
  250. /* number of throttle counters for DCVS adjustment */
  251. #define ADRENO_GPMU_THROTTLE_COUNTERS 4
  252. struct adreno_gpudev;
  253. /* Time to allow preemption to complete (in ms) */
  254. #define ADRENO_PREEMPT_TIMEOUT 10000
  255. #define PREEMPT_SCRATCH_OFFSET(id) (id * sizeof(u64))
  256. #define PREEMPT_SCRATCH_ADDR(dev, id) \
  257. ((dev)->preempt.scratch->gpuaddr + PREEMPT_SCRATCH_OFFSET(id))
  258. /**
  259. * enum adreno_preempt_states
  260. * ADRENO_PREEMPT_NONE: No preemption is scheduled
  261. * ADRENO_PREEMPT_START: The S/W has started
  262. * ADRENO_PREEMPT_TRIGGERED: A preeempt has been triggered in the HW
  263. * ADRENO_PREEMPT_FAULTED: The preempt timer has fired
  264. * ADRENO_PREEMPT_PENDING: The H/W has signaled preemption complete
  265. * ADRENO_PREEMPT_COMPLETE: Preemption could not be finished in the IRQ handler,
  266. * worker has been scheduled
  267. */
  268. enum adreno_preempt_states {
  269. ADRENO_PREEMPT_NONE = 0,
  270. ADRENO_PREEMPT_START,
  271. ADRENO_PREEMPT_TRIGGERED,
  272. ADRENO_PREEMPT_FAULTED,
  273. ADRENO_PREEMPT_PENDING,
  274. ADRENO_PREEMPT_COMPLETE,
  275. };
  276. /**
  277. * struct adreno_protected_regs - container for a protect register span
  278. */
  279. struct adreno_protected_regs {
  280. /** @reg: Physical protected mode register to write to */
  281. u32 reg;
  282. /** @start: Dword offset of the starting register in the range */
  283. u32 start;
  284. /**
  285. * @end: Dword offset of the ending register in the range
  286. * (inclusive)
  287. */
  288. u32 end;
  289. /**
  290. * @noaccess: 1 if the register should not be accessible from
  291. * userspace, 0 if it can be read (but not written)
  292. */
  293. u32 noaccess;
  294. };
  295. /**
  296. * struct adreno_preemption
  297. * @state: The current state of preemption
  298. * @scratch: Per-target scratch memory for implementation specific functionality
  299. * @timer: A timer to make sure preemption doesn't stall
  300. * @work: A work struct for the preemption worker (for 5XX)
  301. * preempt_level: The level of preemption (for 6XX)
  302. * skipsaverestore: To skip saverestore during L1 preemption (for 6XX)
  303. * usesgmem: enable GMEM save/restore across preemption (for 6XX)
  304. * count: Track the number of preemptions triggered
  305. */
  306. struct adreno_preemption {
  307. atomic_t state;
  308. struct kgsl_memdesc *scratch;
  309. struct timer_list timer;
  310. struct work_struct work;
  311. unsigned int preempt_level;
  312. bool skipsaverestore;
  313. bool usesgmem;
  314. unsigned int count;
  315. /* @postamble_len: Number of dwords in KMD postamble pm4 packet */
  316. u32 postamble_len;
  317. /*
  318. * @postamble_bootup_len: Number of dwords in KMD postamble pm4 packet
  319. * that needs to be sent before first submission to GPU.
  320. * Note: Postambles are not preserved across slumber.
  321. */
  322. u32 postamble_bootup_len;
  323. };
  324. struct adreno_busy_data {
  325. unsigned int gpu_busy;
  326. unsigned int bif_ram_cycles;
  327. unsigned int bif_ram_cycles_read_ch1;
  328. unsigned int bif_ram_cycles_write_ch0;
  329. unsigned int bif_ram_cycles_write_ch1;
  330. unsigned int bif_starved_ram;
  331. unsigned int bif_starved_ram_ch1;
  332. unsigned int num_ifpc;
  333. unsigned int throttle_cycles[ADRENO_GPMU_THROTTLE_COUNTERS];
  334. u32 bcl_throttle;
  335. };
  336. /**
  337. * struct adreno_firmware - Struct holding fw details
  338. * @fwvirt: Buffer which holds the ucode
  339. * @size: Size of ucode buffer
  340. * @version: Version of ucode
  341. * @memdesc: Memory descriptor which holds ucode buffer info
  342. */
  343. struct adreno_firmware {
  344. unsigned int *fwvirt;
  345. size_t size;
  346. unsigned int version;
  347. struct kgsl_memdesc *memdesc;
  348. };
  349. /**
  350. * struct adreno_perfcounter_list_node - struct to store perfcounters
  351. * allocated by a process on a kgsl fd.
  352. * @groupid: groupid of the allocated perfcounter
  353. * @countable: countable assigned to the allocated perfcounter
  354. * @node: list node for perfcounter_list of a process
  355. */
  356. struct adreno_perfcounter_list_node {
  357. unsigned int groupid;
  358. unsigned int countable;
  359. struct list_head node;
  360. };
  361. /**
  362. * struct adreno_device_private - Adreno private structure per fd
  363. * @dev_priv: the kgsl device private structure
  364. * @perfcounter_list: list of perfcounters used by the process
  365. */
  366. struct adreno_device_private {
  367. struct kgsl_device_private dev_priv;
  368. struct list_head perfcounter_list;
  369. };
  370. /**
  371. * struct adreno_reglist_list - A container for list of registers and
  372. * number of registers in the list
  373. */
  374. struct adreno_reglist_list {
  375. /** @reg: List of register **/
  376. const u32 *regs;
  377. /** @count: Number of registers in the list **/
  378. u32 count;
  379. };
  380. /**
  381. * struct adreno_power_ops - Container for target specific power up/down
  382. * sequences
  383. */
  384. struct adreno_power_ops {
  385. /**
  386. * @first_open: Target specific function triggered when first kgsl
  387. * instance is opened
  388. */
  389. int (*first_open)(struct adreno_device *adreno_dev);
  390. /**
  391. * @last_close: Target specific function triggered when last kgsl
  392. * instance is closed
  393. */
  394. int (*last_close)(struct adreno_device *adreno_dev);
  395. /**
  396. * @active_count_get: Target specific function to keep gpu from power
  397. * collapsing
  398. */
  399. int (*active_count_get)(struct adreno_device *adreno_dev);
  400. /**
  401. * @active_count_put: Target specific function to allow gpu to power
  402. * collapse
  403. */
  404. void (*active_count_put)(struct adreno_device *adreno_dev);
  405. /** @pm_suspend: Target specific function to suspend the driver */
  406. int (*pm_suspend)(struct adreno_device *adreno_dev);
  407. /** @pm_resume: Target specific function to resume the driver */
  408. void (*pm_resume)(struct adreno_device *adreno_dev);
  409. /**
  410. * @touch_wakeup: Target specific function to start gpu on touch event
  411. */
  412. void (*touch_wakeup)(struct adreno_device *adreno_dev);
  413. /** @gpu_clock_set: Target specific function to set gpu frequency */
  414. int (*gpu_clock_set)(struct adreno_device *adreno_dev, u32 pwrlevel);
  415. /** @gpu_bus_set: Target specific function to set gpu bandwidth */
  416. int (*gpu_bus_set)(struct adreno_device *adreno_dev, int bus_level,
  417. u32 ab);
  418. };
  419. /**
  420. * struct adreno_gpu_core - A specific GPU core definition
  421. * @gpurev: Unique GPU revision identifier
  422. * @core: Match for the core version of the GPU
  423. * @major: Match for the major version of the GPU
  424. * @minor: Match for the minor version of the GPU
  425. * @patchid: Match for the patch revision of the GPU
  426. * @features: Common adreno features supported by this core
  427. * @gpudev: Pointer to the GPU family specific functions for this core
  428. * @uche_gmem_alignment: Alignment required for UCHE GMEM base
  429. * @gmem_size: Amount of binning memory (GMEM/OCMEM) to reserve for the core
  430. * @bus_width: Bytes transferred in 1 cycle
  431. */
  432. struct adreno_gpu_core {
  433. enum adreno_gpurev gpurev;
  434. unsigned int core, major, minor, patchid;
  435. /**
  436. * @compatible: If specified, use the compatible string to match the
  437. * device
  438. */
  439. const char *compatible;
  440. unsigned long features;
  441. const struct adreno_gpudev *gpudev;
  442. const struct adreno_perfcounters *perfcounters;
  443. u32 uche_gmem_alignment;
  444. size_t gmem_size;
  445. u32 bus_width;
  446. /** @snapshot_size: Size of the static snapshot region in bytes */
  447. u32 snapshot_size;
  448. /** @num_ddr_channels: Number of DDR channels */
  449. u32 num_ddr_channels;
  450. };
  451. /**
  452. * struct adreno_dispatch_ops - Common functions for dispatcher operations
  453. */
  454. struct adreno_dispatch_ops {
  455. /* @close: Shut down the dispatcher */
  456. void (*close)(struct adreno_device *adreno_dev);
  457. /* @queue_cmds: Queue a command on the context */
  458. int (*queue_cmds)(struct kgsl_device_private *dev_priv,
  459. struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
  460. u32 count, u32 *timestamp);
  461. /* @queue_context: Queue a context to be dispatched */
  462. void (*queue_context)(struct adreno_device *adreno_dev,
  463. struct adreno_context *drawctxt);
  464. void (*setup_context)(struct adreno_device *adreno_dev,
  465. struct adreno_context *drawctxt);
  466. void (*fault)(struct adreno_device *adreno_dev, u32 fault);
  467. /* @create_hw_fence: Create a hardware fence */
  468. void (*create_hw_fence)(struct adreno_device *adreno_dev, struct kgsl_sync_fence *kfence);
  469. /* @get_fault: Get the GPU fault status */
  470. u32 (*get_fault)(struct adreno_device *adreno_dev);
  471. };
  472. /**
  473. * struct adreno_device - The mothership structure for all adreno related info
  474. * @dev: Reference to struct kgsl_device
  475. * @priv: Holds the private flags specific to the adreno_device
  476. * @chipid: Chip ID specific to the GPU
  477. * @cx_misc_len: Length of the CX MISC register block
  478. * @cx_misc_virt: Pointer where the CX MISC block is mapped
  479. * @isense_base: Base physical address of isense block
  480. * @isense_len: Length of the isense register block
  481. * @isense_virt: Pointer where isense block is mapped
  482. * @gpucore: Pointer to the adreno_gpu_core structure
  483. * @gpmu_cmds_size: Length of gpmu cmd stream
  484. * @gpmu_cmds: gpmu cmd stream
  485. * @ringbuffers: Array of pointers to adreno_ringbuffers
  486. * @num_ringbuffers: Number of ringbuffers for the GPU
  487. * @cur_rb: Pointer to the current ringbuffer
  488. * @next_rb: Ringbuffer we are switching to during preemption
  489. * @prev_rb: Ringbuffer we are switching from during preemption
  490. * @fast_hang_detect: Software fault detection availability
  491. * @ft_policy: Defines the fault tolerance policy
  492. * @long_ib_detect: Long IB detection availability
  493. * @cooperative_reset: Indicates if graceful death handshake is enabled
  494. * between GMU and GPU
  495. * @profile: Container for adreno profiler information
  496. * @dispatcher: Container for adreno GPU dispatcher
  497. * @pwron_fixup: Command buffer to run a post-power collapse shader workaround
  498. * @pwron_fixup_dwords: Number of dwords in the command buffer
  499. * @input_work: Work struct for turning on the GPU after a touch event
  500. * @busy_data: Struct holding GPU VBIF busy stats
  501. * @ram_cycles_lo: Number of DDR clock cycles for the monitor session (Only
  502. * DDR channel 0 read cycles in case of GBIF)
  503. * @ram_cycles_lo_ch1_read: Number of DDR channel 1 Read clock cycles for
  504. * the monitor session
  505. * @ram_cycles_lo_ch0_write: Number of DDR channel 0 Write clock cycles for
  506. * the monitor session
  507. * @ram_cycles_lo_ch1_write: Number of DDR channel 0 Write clock cycles for
  508. * the monitor session
  509. * @starved_ram_lo: Number of cycles VBIF/GBIF is stalled by DDR (Only channel 0
  510. * stall cycles in case of GBIF)
  511. * @starved_ram_lo_ch1: Number of cycles GBIF is stalled by DDR channel 1
  512. * @halt: Atomic variable to check whether the GPU is currently halted
  513. * @pending_irq_refcnt: Atomic variable to keep track of running IRQ handlers
  514. * @ctx_d_debugfs: Context debugfs node
  515. * @profile_buffer: Memdesc holding the drawobj profiling buffer
  516. * @profile_index: Index to store the start/stop ticks in the profiling
  517. * buffer
  518. * @pwrup_reglist: Memdesc holding the power up register list
  519. * which is used by CP during preemption and IFPC
  520. * @lm_sequence: Pointer to the start of the register write sequence for LM
  521. * @lm_size: The dword size of the LM sequence
  522. * @lm_limit: limiting value for LM
  523. * @lm_threshold_count: register value for counter for lm threshold breakin
  524. * @lm_threshold_cross: number of current peaks exceeding threshold
  525. * @ifpc_count: Number of times the GPU went into IFPC
  526. * @highest_bank_bit: Value of the highest bank bit
  527. * @gpmu_throttle_counters - counters for number of throttled clocks
  528. * @irq_storm_work: Worker to handle possible interrupt storms
  529. * @active_list: List to track active contexts
  530. * @active_list_lock: Lock to protect active_list
  531. * @gpu_llc_slice: GPU system cache slice descriptor
  532. * @gpu_llc_slice_enable: To enable the GPU system cache slice or not
  533. * @gpuhtw_llc_slice: GPU pagetables system cache slice descriptor
  534. * @gpuhtw_llc_slice_enable: To enable the GPUHTW system cache slice or not
  535. * @zap_loaded: Used to track if zap was successfully loaded or not
  536. */
  537. struct adreno_device {
  538. struct kgsl_device dev; /* Must be first field in this struct */
  539. unsigned long priv;
  540. unsigned int chipid;
  541. /** @uche_gmem_base: Base address of GMEM for UCHE access */
  542. u64 uche_gmem_base;
  543. unsigned int cx_misc_len;
  544. void __iomem *cx_misc_virt;
  545. unsigned long isense_base;
  546. unsigned int isense_len;
  547. void __iomem *isense_virt;
  548. const struct adreno_gpu_core *gpucore;
  549. struct adreno_firmware fw[2];
  550. size_t gpmu_cmds_size;
  551. unsigned int *gpmu_cmds;
  552. struct adreno_ringbuffer ringbuffers[KGSL_PRIORITY_MAX_RB_LEVELS];
  553. int num_ringbuffers;
  554. struct adreno_ringbuffer *cur_rb;
  555. struct adreno_ringbuffer *next_rb;
  556. struct adreno_ringbuffer *prev_rb;
  557. unsigned int fast_hang_detect;
  558. unsigned long ft_policy;
  559. bool long_ib_detect;
  560. bool cooperative_reset;
  561. struct adreno_profile profile;
  562. struct adreno_dispatcher dispatcher;
  563. struct kgsl_memdesc *pwron_fixup;
  564. unsigned int pwron_fixup_dwords;
  565. struct work_struct input_work;
  566. struct adreno_busy_data busy_data;
  567. unsigned int ram_cycles_lo;
  568. unsigned int ram_cycles_lo_ch1_read;
  569. unsigned int ram_cycles_lo_ch0_write;
  570. unsigned int ram_cycles_lo_ch1_write;
  571. unsigned int starved_ram_lo;
  572. unsigned int starved_ram_lo_ch1;
  573. atomic_t halt;
  574. atomic_t pending_irq_refcnt;
  575. struct dentry *ctx_d_debugfs;
  576. /** @lm_enabled: True if limits management is enabled for this target */
  577. bool lm_enabled;
  578. /** @acd_enabled: True if acd is enabled for this target */
  579. bool acd_enabled;
  580. /** @hwcg_enabled: True if hardware clock gating is enabled */
  581. bool hwcg_enabled;
  582. /** @throttling_enabled: True if LM throttling is enabled on a5xx */
  583. bool throttling_enabled;
  584. /** @sptp_pc_enabled: True if SPTP power collapse is enabled on a5xx */
  585. bool sptp_pc_enabled;
  586. /** @bcl_enabled: True if BCL is enabled */
  587. bool bcl_enabled;
  588. /** @clx_enabled: True if CLX is enabled */
  589. bool clx_enabled;
  590. /** @lpac_enabled: True if LPAC is enabled */
  591. bool lpac_enabled;
  592. /** @dms_enabled: True if DMS is enabled */
  593. bool dms_enabled;
  594. /** @warmboot_enabled: True if warmboot is enabled */
  595. bool warmboot_enabled;
  596. /** @preempt_override: True if command line param enables preemption */
  597. bool preempt_override;
  598. struct kgsl_memdesc *profile_buffer;
  599. unsigned int profile_index;
  600. struct kgsl_memdesc *pwrup_reglist;
  601. uint32_t *lm_sequence;
  602. uint32_t lm_size;
  603. struct adreno_preemption preempt;
  604. struct work_struct gpmu_work;
  605. uint32_t lm_leakage;
  606. uint32_t lm_limit;
  607. uint32_t lm_threshold_count;
  608. uint32_t lm_threshold_cross;
  609. uint32_t ifpc_count;
  610. unsigned int highest_bank_bit;
  611. unsigned int quirks;
  612. #ifdef CONFIG_QCOM_KGSL_CORESIGHT
  613. /** @gx_coresight: A coresight instance for GX */
  614. struct adreno_coresight_device gx_coresight;
  615. /** @gx_coresight: A coresight instance for CX */
  616. struct adreno_coresight_device cx_coresight;
  617. /** @funnel_gfx: A coresight instance for gfx funnel */
  618. struct adreno_funnel_device funnel_gfx;
  619. #endif
  620. uint32_t gpmu_throttle_counters[ADRENO_GPMU_THROTTLE_COUNTERS];
  621. struct work_struct irq_storm_work;
  622. struct list_head active_list;
  623. spinlock_t active_list_lock;
  624. void *gpu_llc_slice;
  625. bool gpu_llc_slice_enable;
  626. void *gpuhtw_llc_slice;
  627. bool gpuhtw_llc_slice_enable;
  628. unsigned int zap_loaded;
  629. /**
  630. * @critpkts: Memory descriptor for 5xx critical packets if applicable
  631. */
  632. struct kgsl_memdesc *critpkts;
  633. /**
  634. * @critpkts: Memory descriptor for 5xx secure critical packets
  635. */
  636. struct kgsl_memdesc *critpkts_secure;
  637. /** @irq_mask: The current interrupt mask for the GPU device */
  638. u32 irq_mask;
  639. /*
  640. * @soft_ft_regs: an array of registers for soft fault detection on a3xx
  641. * targets
  642. */
  643. u32 *soft_ft_regs;
  644. /*
  645. * @soft_ft_vals: an array of register values for soft fault detection
  646. * on a3xx targets
  647. */
  648. u32 *soft_ft_vals;
  649. /*
  650. * @soft_ft_vals: number of elements in @soft_ft_regs and @soft_ft_vals
  651. */
  652. int soft_ft_count;
  653. /* @dispatch_ops: A pointer to a set of adreno dispatch ops */
  654. const struct adreno_dispatch_ops *dispatch_ops;
  655. /** @hwsched: Container for the hardware dispatcher */
  656. struct adreno_hwsched hwsched;
  657. /*
  658. * @perfcounter: Flag to clear perfcounters across contexts and
  659. * controls perfcounter ioctl read
  660. */
  661. bool perfcounter;
  662. /** @gmu_hub_clk_freq: Gmu hub interface clock frequency */
  663. u64 gmu_hub_clk_freq;
  664. /* @patch_reglist: If false power up register list needs to be patched */
  665. bool patch_reglist;
  666. /*
  667. * @uche_client_pf: uche_client_pf client register configuration
  668. * for pf debugging
  669. */
  670. u32 uche_client_pf;
  671. /**
  672. * @bcl_data: bit 0 contains response type for bcl alarms and bits 1:24 controls
  673. * throttle level for bcl alarm levels 0-2. If not set, gmu fw sets default throttle levels.
  674. */
  675. u32 bcl_data;
  676. /*
  677. * @bcl_debugfs_dir: Debugfs directory node for bcl related nodes
  678. */
  679. struct dentry *bcl_debugfs_dir;
  680. /** @bcl_throttle_time_us: Total time in us spent in BCL throttling */
  681. u32 bcl_throttle_time_us;
  682. /* @preemption_debugfs_dir: Debugfs directory node for preemption related nodes */
  683. struct dentry *preemption_debugfs_dir;
  684. /* @hwsched_enabled: If true, hwsched is enabled */
  685. bool hwsched_enabled;
  686. /* @fastblend_enabled: True if fastblend feature is enabled */
  687. bool fastblend_enabled;
  688. /* @raytracing_enabled: True if raytracing feature is enabled */
  689. bool raytracing_enabled;
  690. /* @feature_fuse: feature fuse value read from HW */
  691. u32 feature_fuse;
  692. /** @gmu_ab: Track if GMU supports ab vote */
  693. bool gmu_ab;
  694. /** @ifpc_hyst: IFPC long hysteresis value */
  695. u32 ifpc_hyst;
  696. /** @ifpc_hyst_floor: IFPC long hysteresis floor value */
  697. u32 ifpc_hyst_floor;
  698. /** @cx_misc_base: CX MISC register block base offset */
  699. u32 cx_misc_base;
  700. /*
  701. * @no_restore_count: Keep track of perfcounter requests that don't have
  702. * ADRENO_PERFCOUNTER_GROUP_RESTORE flag set
  703. */
  704. u32 no_restore_count;
  705. /*
  706. * @ahb_timeout_val: AHB transaction timeout value.
  707. * If set, a timeout will occur in 2 ^ (ahb_timeout_val + 1) cycles.
  708. */
  709. u32 ahb_timeout_val;
  710. };
  711. /**
  712. * enum adreno_device_flags - Private flags for the adreno_device
  713. * @ADRENO_DEVICE_PWRON - Set during init after a power collapse
  714. * @ADRENO_DEVICE_PWRON_FIXUP - Set if the target requires the shader fixup
  715. * after power collapse
  716. * @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress
  717. * @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't
  718. * send any more commands to the ringbuffer)
  719. * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj
  720. * profiling via the ALWAYSON counter
  721. * @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption
  722. * @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled
  723. * @ADRENO_DEVICE_GPMU_INITIALIZED - Set if GPMU firmware initialization succeed
  724. * @ADRENO_DEVICE_ISDB_ENABLED - Set if the Integrated Shader DeBugger is
  725. * attached and enabled
  726. * @ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED - Set if a CACHE_FLUSH_TS irq storm
  727. * is in progress
  728. */
  729. enum adreno_device_flags {
  730. ADRENO_DEVICE_PWRON = 0,
  731. ADRENO_DEVICE_PWRON_FIXUP = 1,
  732. ADRENO_DEVICE_INITIALIZED = 2,
  733. ADRENO_DEVICE_STARTED = 5,
  734. ADRENO_DEVICE_FAULT = 6,
  735. ADRENO_DEVICE_DRAWOBJ_PROFILE = 7,
  736. ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8,
  737. ADRENO_DEVICE_PREEMPTION = 9,
  738. ADRENO_DEVICE_SOFT_FAULT_DETECT = 10,
  739. ADRENO_DEVICE_GPMU_INITIALIZED = 11,
  740. ADRENO_DEVICE_ISDB_ENABLED = 12,
  741. ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED = 13,
  742. /** @ADRENO_DEVICE_DMS: Set if DMS is enabled */
  743. ADRENO_DEVICE_DMS = 14,
  744. /** @ADRENO_DEVICE_GMU_AB: Set if AB vote via GMU is enabled */
  745. ADRENO_DEVICE_GMU_AB = 15,
  746. /*
  747. * @ADRENO_DEVICE_FORCE_COLDBOOT: Set if a feature is toggled
  748. * via sysfs/debugfs or when we are doing fault recovery
  749. */
  750. ADRENO_DEVICE_FORCE_COLDBOOT = 16,
  751. /** @ADRENO_DEVICE_CX_TIMER_INITIALIZED: Set if the CX timer is initialized */
  752. ADRENO_DEVICE_CX_TIMER_INITIALIZED = 17,
  753. };
  754. /**
  755. * struct adreno_drawobj_profile_entry - a single drawobj entry in the
  756. * kernel profiling buffer
  757. * @started: Number of GPU ticks at start of the drawobj
  758. * @retired: Number of GPU ticks at the end of the drawobj
  759. * @ctx_start: CP_ALWAYS_ON_CONTEXT tick at start of the drawobj
  760. * @ctx_end: CP_ALWAYS_ON_CONTEXT tick at end of the drawobj
  761. */
  762. struct adreno_drawobj_profile_entry {
  763. uint64_t started;
  764. uint64_t retired;
  765. uint64_t ctx_start;
  766. uint64_t ctx_end;
  767. };
  768. #define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \
  769. ((_index) * sizeof(struct adreno_drawobj_profile_entry) \
  770. + offsetof(struct adreno_drawobj_profile_entry, _member))
  771. /**
  772. * adreno_regs: List of registers that are used in kgsl driver for all
  773. * 3D devices. Each device type has different offset value for the same
  774. * register, so an array of register offsets are declared for every device
  775. * and are indexed by the enumeration values defined in this enum
  776. */
  777. enum adreno_regs {
  778. ADRENO_REG_CP_ME_RAM_DATA,
  779. ADRENO_REG_CP_RB_BASE,
  780. ADRENO_REG_CP_RB_BASE_HI,
  781. ADRENO_REG_CP_RB_RPTR_ADDR_LO,
  782. ADRENO_REG_CP_RB_RPTR_ADDR_HI,
  783. ADRENO_REG_CP_RB_RPTR,
  784. ADRENO_REG_CP_RB_WPTR,
  785. ADRENO_REG_CP_ME_CNTL,
  786. ADRENO_REG_CP_RB_CNTL,
  787. ADRENO_REG_CP_IB1_BASE,
  788. ADRENO_REG_CP_IB1_BASE_HI,
  789. ADRENO_REG_CP_IB1_BUFSZ,
  790. ADRENO_REG_CP_IB2_BASE,
  791. ADRENO_REG_CP_IB2_BASE_HI,
  792. ADRENO_REG_CP_IB2_BUFSZ,
  793. ADRENO_REG_CP_TIMESTAMP,
  794. ADRENO_REG_CP_SCRATCH_REG6,
  795. ADRENO_REG_CP_SCRATCH_REG7,
  796. ADRENO_REG_CP_PROTECT_STATUS,
  797. ADRENO_REG_CP_PREEMPT,
  798. ADRENO_REG_CP_PREEMPT_DEBUG,
  799. ADRENO_REG_CP_PREEMPT_DISABLE,
  800. ADRENO_REG_CP_PROTECT_REG_0,
  801. ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
  802. ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
  803. ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO,
  804. ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI,
  805. ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO,
  806. ADRENO_REG_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI,
  807. ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO,
  808. ADRENO_REG_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI,
  809. ADRENO_REG_CP_PREEMPT_LEVEL_STATUS,
  810. ADRENO_REG_RBBM_STATUS,
  811. ADRENO_REG_RBBM_STATUS3,
  812. ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
  813. ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
  814. ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
  815. ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
  816. ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
  817. ADRENO_REG_RBBM_INT_0_MASK,
  818. ADRENO_REG_RBBM_PM_OVERRIDE2,
  819. ADRENO_REG_RBBM_SW_RESET_CMD,
  820. ADRENO_REG_RBBM_CLOCK_CTL,
  821. ADRENO_REG_PA_SC_AA_CONFIG,
  822. ADRENO_REG_SQ_GPR_MANAGEMENT,
  823. ADRENO_REG_SQ_INST_STORE_MANAGEMENT,
  824. ADRENO_REG_TP0_CHICKEN,
  825. ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
  826. ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
  827. ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
  828. ADRENO_REG_GMU_AHB_FENCE_STATUS,
  829. ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
  830. ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
  831. ADRENO_REG_REGISTER_MAX,
  832. };
  833. #define ADRENO_REG_UNUSED 0xFFFFFFFF
  834. #define ADRENO_REG_SKIP 0xFFFFFFFE
  835. #define ADRENO_REG_DEFINE(_offset, _reg)[_offset] = _reg
  836. struct adreno_irq_funcs {
  837. void (*func)(struct adreno_device *adreno_dev, int mask);
  838. };
  839. #define ADRENO_IRQ_CALLBACK(_c) { .func = _c }
  840. /*
  841. * struct adreno_debugbus_block - Holds info about debug buses of a chip
  842. * @block_id: Bus identifier
  843. * @dwords: Number of dwords of data that this block holds
  844. */
  845. struct adreno_debugbus_block {
  846. unsigned int block_id;
  847. unsigned int dwords;
  848. };
  849. enum adreno_cp_marker_type {
  850. IFPC_DISABLE,
  851. IFPC_ENABLE,
  852. IB1LIST_START,
  853. IB1LIST_END,
  854. };
  855. struct adreno_gpudev {
  856. /*
  857. * These registers are in a different location on different devices,
  858. * so define them in the structure and use them as variables.
  859. */
  860. unsigned int *const reg_offsets;
  861. /* GPU specific function hooks */
  862. int (*probe)(struct platform_device *pdev, u32 chipid,
  863. const struct adreno_gpu_core *gpucore);
  864. void (*snapshot)(struct adreno_device *adreno_dev,
  865. struct kgsl_snapshot *snapshot);
  866. irqreturn_t (*irq_handler)(struct adreno_device *adreno_dev);
  867. int (*init)(struct adreno_device *adreno_dev);
  868. void (*remove)(struct adreno_device *adreno_dev);
  869. int (*rb_start)(struct adreno_device *adreno_dev);
  870. int (*start)(struct adreno_device *adreno_dev);
  871. int (*regulator_enable)(struct adreno_device *adreno_dev);
  872. void (*regulator_disable)(struct adreno_device *adreno_dev);
  873. void (*pwrlevel_change_settings)(struct adreno_device *adreno_dev,
  874. unsigned int prelevel, unsigned int postlevel,
  875. bool post);
  876. void (*preemption_schedule)(struct adreno_device *adreno_dev);
  877. int (*preemption_context_init)(struct kgsl_context *context);
  878. void (*context_detach)(struct adreno_context *drawctxt);
  879. void (*pre_reset)(struct adreno_device *adreno_dev);
  880. void (*gpu_keepalive)(struct adreno_device *adreno_dev,
  881. bool state);
  882. bool (*hw_isidle)(struct adreno_device *adreno_dev);
  883. const char *(*iommu_fault_block)(struct kgsl_device *device,
  884. unsigned int fsynr1);
  885. int (*reset)(struct adreno_device *adreno_dev);
  886. /** @read_alwayson: Return the current value of the alwayson counter */
  887. u64 (*read_alwayson)(struct adreno_device *adreno_dev);
  888. /**
  889. * @power_ops: Target specific function pointers to power up/down the
  890. * gpu
  891. */
  892. const struct adreno_power_ops *power_ops;
  893. int (*clear_pending_transactions)(struct adreno_device *adreno_dev);
  894. void (*deassert_gbif_halt)(struct adreno_device *adreno_dev);
  895. int (*ringbuffer_submitcmd)(struct adreno_device *adreno_dev,
  896. struct kgsl_drawobj_cmd *cmdobj, u32 flags,
  897. struct adreno_submit_time *time);
  898. /**
  899. * @is_hw_collapsible: Return true if the hardware can be collapsed.
  900. * Only used by non GMU/RGMU targets
  901. */
  902. bool (*is_hw_collapsible)(struct adreno_device *adreno_dev);
  903. /**
  904. * @power_stats - Return the perfcounter statistics for DCVS
  905. */
  906. void (*power_stats)(struct adreno_device *adreno_dev,
  907. struct kgsl_power_stats *stats);
  908. int (*setproperty)(struct kgsl_device_private *priv, u32 type,
  909. void __user *value, u32 sizebytes);
  910. int (*add_to_va_minidump)(struct adreno_device *adreno_dev);
  911. /**
  912. * @gx_is_on - Return true if both gfx clock and gxgdsc are enabled.
  913. */
  914. bool (*gx_is_on)(struct adreno_device *adreno_dev);
  915. /**
  916. * @send_recurring_cmdobj - Target specific function to send recurring IBs to GMU
  917. */
  918. int (*send_recurring_cmdobj)(struct adreno_device *adreno_dev,
  919. struct kgsl_drawobj_cmd *cmdobj);
  920. /**
  921. * @perfcounter_remove: Remove perfcounter from the power up list
  922. */
  923. int (*perfcounter_remove)(struct adreno_device *adreno_dev,
  924. struct adreno_perfcount_register *reg, u32 groupid);
  925. /**
  926. * @set_isdb_breakpoint_registers - Program isdb registers to issue break command
  927. */
  928. void (*set_isdb_breakpoint_registers)(struct adreno_device *adreno_dev);
  929. /**
  930. * @context_destroy: Target specific function called during context destruction
  931. */
  932. void (*context_destroy)(struct adreno_device *adreno_dev, struct adreno_context *drawctxt);
  933. /**
  934. * @swfuse_irqctrl: To enable/disable sw fuse violation interrupt
  935. */
  936. void (*swfuse_irqctrl)(struct adreno_device *adreno_dev, bool state);
  937. /**
  938. * @lpac_store: To enable/disable lpac at runtime
  939. */
  940. int (*lpac_store)(struct adreno_device *adreno_dev, bool enable);
  941. /*
  942. * @get_uche_trap_base: Return the UCHE_TRAP_BASE value
  943. */
  944. u64 (*get_uche_trap_base)(void);
  945. /**
  946. * @fault_header: Print fault header
  947. */
  948. void (*fault_header)(struct adreno_device *adreno_dev, struct kgsl_drawobj *drawobj);
  949. /**
  950. * @lpac_fault_header: Print LPAC fault header
  951. */
  952. void (*lpac_fault_header)(struct adreno_device *adreno_dev, struct kgsl_drawobj *drawobj);
  953. };
  954. /**
  955. * enum kgsl_ft_policy_bits - KGSL fault tolerance policy bits
  956. * @KGSL_FT_OFF: Disable fault detection (not used)
  957. * @KGSL_FT_REPLAY: Replay the faulting command
  958. * @KGSL_FT_SKIPIB: Skip the faulting indirect buffer
  959. * @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB
  960. * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj
  961. * @KGSL_FT_TEMP_DISABLE: Disables FT for all commands
  962. * @KGSL_FT_THROTTLE: Disable the context if it faults too often
  963. * @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB
  964. */
  965. enum kgsl_ft_policy_bits {
  966. KGSL_FT_OFF = 0,
  967. KGSL_FT_REPLAY,
  968. KGSL_FT_SKIPIB,
  969. KGSL_FT_SKIPFRAME,
  970. KGSL_FT_DISABLE,
  971. KGSL_FT_TEMP_DISABLE,
  972. KGSL_FT_THROTTLE,
  973. KGSL_FT_SKIPCMD,
  974. /* KGSL_FT_MAX_BITS is used to calculate the mask */
  975. KGSL_FT_MAX_BITS,
  976. /* Internal bits - set during GFT */
  977. /* Skip the PM dump on replayed command obj's */
  978. KGSL_FT_SKIP_PMDUMP = 31,
  979. };
  980. #define KGSL_FT_POLICY_MASK GENMASK(KGSL_FT_MAX_BITS - 1, 0)
  981. #define FOR_EACH_RINGBUFFER(_dev, _rb, _i) \
  982. for ((_i) = 0, (_rb) = &((_dev)->ringbuffers[0]); \
  983. (_i) < (_dev)->num_ringbuffers; \
  984. (_i)++, (_rb)++)
  985. extern const struct adreno_power_ops adreno_power_operations;
  986. extern const struct adreno_gpudev adreno_a3xx_gpudev;
  987. extern const struct adreno_gpudev adreno_a5xx_gpudev;
  988. extern const struct adreno_gpudev adreno_a6xx_gpudev;
  989. extern const struct adreno_gpudev adreno_a6xx_rgmu_gpudev;
  990. extern const struct adreno_gpudev adreno_a619_holi_gpudev;
  991. extern const struct adreno_gpudev adreno_a611_gpudev;
  992. extern int adreno_wake_nice;
  993. extern unsigned int adreno_wake_timeout;
  994. int adreno_start(struct kgsl_device *device, int priority);
  995. long adreno_ioctl(struct kgsl_device_private *dev_priv,
  996. unsigned int cmd, unsigned long arg);
  997. long adreno_ioctl_helper(struct kgsl_device_private *dev_priv,
  998. unsigned int cmd, unsigned long arg,
  999. const struct kgsl_ioctl *cmds, int len);
  1000. int adreno_spin_idle(struct adreno_device *device, unsigned int timeout);
  1001. int adreno_idle(struct kgsl_device *device);
  1002. int adreno_set_constraint(struct kgsl_device *device,
  1003. struct kgsl_context *context,
  1004. struct kgsl_device_constraint *constraint);
  1005. void adreno_snapshot(struct kgsl_device *device,
  1006. struct kgsl_snapshot *snapshot,
  1007. struct kgsl_context *context, struct kgsl_context *context_lpac);
  1008. int adreno_reset(struct kgsl_device *device, int fault);
  1009. void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
  1010. struct adreno_context *drawctxt,
  1011. struct kgsl_drawobj *drawobj);
  1012. void adreno_hang_int_callback(struct adreno_device *adreno_dev, int bit);
  1013. void adreno_cp_callback(struct adreno_device *adreno_dev, int bit);
  1014. int adreno_sysfs_init(struct adreno_device *adreno_dev);
  1015. void adreno_irqctrl(struct adreno_device *adreno_dev, int state);
  1016. long adreno_ioctl_perfcounter_get(struct kgsl_device_private *dev_priv,
  1017. unsigned int cmd, void *data);
  1018. long adreno_ioctl_perfcounter_put(struct kgsl_device_private *dev_priv,
  1019. unsigned int cmd, void *data);
  1020. void adreno_cx_misc_regread(struct adreno_device *adreno_dev,
  1021. unsigned int offsetwords, unsigned int *value);
  1022. void adreno_cx_misc_regwrite(struct adreno_device *adreno_dev,
  1023. unsigned int offsetwords, unsigned int value);
  1024. void adreno_cx_misc_regrmw(struct adreno_device *adreno_dev,
  1025. unsigned int offsetwords,
  1026. unsigned int mask, unsigned int bits);
  1027. void adreno_isense_regread(struct adreno_device *adreno_dev,
  1028. unsigned int offsetwords, unsigned int *value);
  1029. bool adreno_gx_is_on(struct adreno_device *adreno_dev);
  1030. u64 adreno_read_cx_timer(struct adreno_device *adreno_dev);
  1031. /**
  1032. * adreno_active_count_get - Wrapper for target specific active count get
  1033. * @adreno_dev: pointer to the adreno device
  1034. *
  1035. * Increase the active count for the KGSL device and execute slumber exit
  1036. * sequence if this is the first reference. Code paths that need to touch the
  1037. * hardware or wait for the hardware to complete an operation must hold an
  1038. * active count reference until they are finished. The device mutex must be held
  1039. * while calling this function.
  1040. *
  1041. * Return: 0 on success or negative error on failure to wake up the device
  1042. */
  1043. int adreno_active_count_get(struct adreno_device *adreno_dev);
  1044. /**
  1045. * adreno_active_count_put - Wrapper for target specific active count put
  1046. * @adreno_dev: pointer to the adreno device
  1047. *
  1048. * Decrease the active or the KGSL device and schedule the idle thread to
  1049. * execute the slumber sequence if there are no remaining references. The
  1050. * device mutex must be held while calling this function.
  1051. */
  1052. void adreno_active_count_put(struct adreno_device *adreno_dev);
  1053. #define ADRENO_TARGET(_name, _id) \
  1054. static inline int adreno_is_##_name(struct adreno_device *adreno_dev) \
  1055. { \
  1056. return (ADRENO_GPUREV(adreno_dev) == (_id)); \
  1057. }
  1058. static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
  1059. {
  1060. return ((ADRENO_GPUREV(adreno_dev) >= 300) &&
  1061. (ADRENO_GPUREV(adreno_dev) < 400));
  1062. }
  1063. ADRENO_TARGET(a304, ADRENO_REV_A304)
  1064. ADRENO_TARGET(a306, ADRENO_REV_A306)
  1065. ADRENO_TARGET(a306a, ADRENO_REV_A306A)
  1066. static inline int adreno_is_a5xx(struct adreno_device *adreno_dev)
  1067. {
  1068. return ADRENO_GPUREV(adreno_dev) >= 500 &&
  1069. ADRENO_GPUREV(adreno_dev) < 600;
  1070. }
  1071. ADRENO_TARGET(a505, ADRENO_REV_A505)
  1072. ADRENO_TARGET(a506, ADRENO_REV_A506)
  1073. ADRENO_TARGET(a508, ADRENO_REV_A508)
  1074. ADRENO_TARGET(a510, ADRENO_REV_A510)
  1075. ADRENO_TARGET(a512, ADRENO_REV_A512)
  1076. ADRENO_TARGET(a530, ADRENO_REV_A530)
  1077. ADRENO_TARGET(a540, ADRENO_REV_A540)
  1078. static inline int adreno_is_a530v2(struct adreno_device *adreno_dev)
  1079. {
  1080. return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A530) &&
  1081. (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1);
  1082. }
  1083. static inline int adreno_is_a530v3(struct adreno_device *adreno_dev)
  1084. {
  1085. return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A530) &&
  1086. (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 2);
  1087. }
  1088. static inline int adreno_is_a505_or_a506(struct adreno_device *adreno_dev)
  1089. {
  1090. return ADRENO_GPUREV(adreno_dev) >= 505 &&
  1091. ADRENO_GPUREV(adreno_dev) <= 506;
  1092. }
  1093. static inline int adreno_is_a6xx(struct adreno_device *adreno_dev)
  1094. {
  1095. return ADRENO_GPUREV(adreno_dev) >= 600 &&
  1096. ADRENO_GPUREV(adreno_dev) <= 702;
  1097. }
  1098. static inline int adreno_is_a660_shima(struct adreno_device *adreno_dev)
  1099. {
  1100. return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A660) &&
  1101. (adreno_dev->gpucore->compatible &&
  1102. !strcmp(adreno_dev->gpucore->compatible,
  1103. "qcom,adreno-gpu-a660-shima"));
  1104. }
  1105. ADRENO_TARGET(a610, ADRENO_REV_A610)
  1106. ADRENO_TARGET(a611, ADRENO_REV_A611)
  1107. ADRENO_TARGET(a612, ADRENO_REV_A612)
  1108. ADRENO_TARGET(a618, ADRENO_REV_A618)
  1109. ADRENO_TARGET(a619, ADRENO_REV_A619)
  1110. ADRENO_TARGET(a621, ADRENO_REV_A621)
  1111. ADRENO_TARGET(a630, ADRENO_REV_A630)
  1112. ADRENO_TARGET(a635, ADRENO_REV_A635)
  1113. ADRENO_TARGET(a662, ADRENO_REV_A662)
  1114. ADRENO_TARGET(a640, ADRENO_REV_A640)
  1115. ADRENO_TARGET(a650, ADRENO_REV_A650)
  1116. ADRENO_TARGET(a663, ADRENO_REV_A663)
  1117. ADRENO_TARGET(a680, ADRENO_REV_A680)
  1118. ADRENO_TARGET(a702, ADRENO_REV_A702)
  1119. /* A635 is derived from A660 and shares same logic */
  1120. static inline int adreno_is_a660(struct adreno_device *adreno_dev)
  1121. {
  1122. unsigned int rev = ADRENO_GPUREV(adreno_dev);
  1123. return (rev == ADRENO_REV_A660 || rev == ADRENO_REV_A635 ||
  1124. rev == ADRENO_REV_A662);
  1125. }
  1126. /*
  1127. * All the derived chipsets from A615 needs to be added to this
  1128. * list such as A616, A618, A619 etc.
  1129. */
  1130. static inline int adreno_is_a615_family(struct adreno_device *adreno_dev)
  1131. {
  1132. unsigned int rev = ADRENO_GPUREV(adreno_dev);
  1133. return (rev == ADRENO_REV_A615 || rev == ADRENO_REV_A616 ||
  1134. rev == ADRENO_REV_A618 || rev == ADRENO_REV_A619);
  1135. }
  1136. /*
  1137. * Derived GPUs from A640 needs to be added to this list.
  1138. * A640 and A680 belongs to this family.
  1139. */
  1140. static inline int adreno_is_a640_family(struct adreno_device *adreno_dev)
  1141. {
  1142. unsigned int rev = ADRENO_GPUREV(adreno_dev);
  1143. return (rev == ADRENO_REV_A640 || rev == ADRENO_REV_A680);
  1144. }
  1145. /*
  1146. * Derived GPUs from A650 needs to be added to this list.
  1147. * A650 is derived from A640 but register specs has been
  1148. * changed hence do not belongs to A640 family. A620, A621,
  1149. * A660, A663, A690 follows the register specs of A650.
  1150. *
  1151. */
  1152. static inline int adreno_is_a650_family(struct adreno_device *adreno_dev)
  1153. {
  1154. unsigned int rev = ADRENO_GPUREV(adreno_dev);
  1155. return (rev == ADRENO_REV_A650 || rev == ADRENO_REV_A620 ||
  1156. rev == ADRENO_REV_A660 || rev == ADRENO_REV_A635 ||
  1157. rev == ADRENO_REV_A662 || rev == ADRENO_REV_A621 ||
  1158. rev == ADRENO_REV_A663);
  1159. }
  1160. static inline int adreno_is_a619_holi(struct adreno_device *adreno_dev)
  1161. {
  1162. return of_device_is_compatible(adreno_dev->dev.pdev->dev.of_node,
  1163. "qcom,adreno-gpu-a619-holi");
  1164. }
  1165. static inline int adreno_is_a620(struct adreno_device *adreno_dev)
  1166. {
  1167. unsigned int rev = ADRENO_GPUREV(adreno_dev);
  1168. return (rev == ADRENO_REV_A620 || rev == ADRENO_REV_A621);
  1169. }
  1170. static inline int adreno_is_a610_family(struct adreno_device *adreno_dev)
  1171. {
  1172. unsigned int rev = ADRENO_GPUREV(adreno_dev);
  1173. return (rev == ADRENO_REV_A610 || rev == ADRENO_REV_A611);
  1174. }
  1175. static inline int adreno_is_a640v2(struct adreno_device *adreno_dev)
  1176. {
  1177. return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A640) &&
  1178. (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1);
  1179. }
  1180. static inline int adreno_is_gen7(struct adreno_device *adreno_dev)
  1181. {
  1182. return ADRENO_GPUREV(adreno_dev) >= 0x070000 &&
  1183. ADRENO_GPUREV(adreno_dev) < 0x080000;
  1184. }
  1185. static inline int adreno_is_gen8(struct adreno_device *adreno_dev)
  1186. {
  1187. return ADRENO_GPUREV(adreno_dev) >= 0x080000 &&
  1188. ADRENO_GPUREV(adreno_dev) < 0x090000;
  1189. }
  1190. ADRENO_TARGET(gen7_0_0, ADRENO_REV_GEN7_0_0)
  1191. ADRENO_TARGET(gen7_0_1, ADRENO_REV_GEN7_0_1)
  1192. ADRENO_TARGET(gen7_2_0, ADRENO_REV_GEN7_2_0)
  1193. ADRENO_TARGET(gen7_2_1, ADRENO_REV_GEN7_2_1)
  1194. ADRENO_TARGET(gen7_4_0, ADRENO_REV_GEN7_4_0)
  1195. ADRENO_TARGET(gen7_9_0, ADRENO_REV_GEN7_9_0)
  1196. ADRENO_TARGET(gen7_9_1, ADRENO_REV_GEN7_9_1)
  1197. ADRENO_TARGET(gen7_11_0, ADRENO_REV_GEN7_11_0)
  1198. ADRENO_TARGET(gen8_3_0, ADRENO_REV_GEN8_3_0)
  1199. static inline int adreno_is_gen7_9_x(struct adreno_device *adreno_dev)
  1200. {
  1201. return adreno_is_gen7_9_0(adreno_dev) || adreno_is_gen7_9_1(adreno_dev);
  1202. }
  1203. static inline int adreno_is_gen7_0_x_family(struct adreno_device *adreno_dev)
  1204. {
  1205. return adreno_is_gen7_0_0(adreno_dev) || adreno_is_gen7_0_1(adreno_dev) ||
  1206. adreno_is_gen7_4_0(adreno_dev);
  1207. }
  1208. static inline int adreno_is_gen7_2_x_family(struct adreno_device *adreno_dev)
  1209. {
  1210. return adreno_is_gen7_2_0(adreno_dev) || adreno_is_gen7_2_1(adreno_dev) ||
  1211. adreno_is_gen7_9_x(adreno_dev) || adreno_is_gen7_11_0(adreno_dev);
  1212. }
  1213. /*
  1214. * adreno_checkreg_off() - Checks the validity of a register enum
  1215. * @adreno_dev: Pointer to adreno device
  1216. * @offset_name: The register enum that is checked
  1217. */
  1218. static inline bool adreno_checkreg_off(struct adreno_device *adreno_dev,
  1219. enum adreno_regs offset_name)
  1220. {
  1221. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1222. if (offset_name >= ADRENO_REG_REGISTER_MAX ||
  1223. gpudev->reg_offsets[offset_name] == ADRENO_REG_UNUSED)
  1224. return false;
  1225. /*
  1226. * GPU register programming is kept common as much as possible
  1227. * across the cores, Use ADRENO_REG_SKIP when certain register
  1228. * programming needs to be skipped for certain GPU cores.
  1229. * Example: Certain registers on a5xx like IB1_BASE are 64 bit.
  1230. * Common programming programs 64bit register but upper 32 bits
  1231. * are skipped in a3xx using ADRENO_REG_SKIP.
  1232. */
  1233. if (gpudev->reg_offsets[offset_name] == ADRENO_REG_SKIP)
  1234. return false;
  1235. return true;
  1236. }
  1237. /*
  1238. * adreno_readreg() - Read a register by getting its offset from the
  1239. * offset array defined in gpudev node
  1240. * @adreno_dev: Pointer to the adreno device
  1241. * @offset_name: The register enum that is to be read
  1242. * @val: Register value read is placed here
  1243. */
  1244. static inline void adreno_readreg(struct adreno_device *adreno_dev,
  1245. enum adreno_regs offset_name, unsigned int *val)
  1246. {
  1247. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1248. if (adreno_checkreg_off(adreno_dev, offset_name))
  1249. kgsl_regread(KGSL_DEVICE(adreno_dev),
  1250. gpudev->reg_offsets[offset_name], val);
  1251. else
  1252. *val = 0;
  1253. }
  1254. /*
  1255. * adreno_writereg() - Write a register by getting its offset from the
  1256. * offset array defined in gpudev node
  1257. * @adreno_dev: Pointer to the adreno device
  1258. * @offset_name: The register enum that is to be written
  1259. * @val: Value to write
  1260. */
  1261. static inline void adreno_writereg(struct adreno_device *adreno_dev,
  1262. enum adreno_regs offset_name, unsigned int val)
  1263. {
  1264. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1265. if (adreno_checkreg_off(adreno_dev, offset_name))
  1266. kgsl_regwrite(KGSL_DEVICE(adreno_dev),
  1267. gpudev->reg_offsets[offset_name], val);
  1268. }
  1269. /*
  1270. * adreno_getreg() - Returns the offset value of a register from the
  1271. * register offset array in the gpudev node
  1272. * @adreno_dev: Pointer to the adreno device
  1273. * @offset_name: The register enum whore offset is returned
  1274. */
  1275. static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
  1276. enum adreno_regs offset_name)
  1277. {
  1278. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1279. if (!adreno_checkreg_off(adreno_dev, offset_name))
  1280. return ADRENO_REG_REGISTER_MAX;
  1281. return gpudev->reg_offsets[offset_name];
  1282. }
  1283. /*
  1284. * adreno_write_gmureg() - Write a GMU register by getting its offset from the
  1285. * offset array defined in gpudev node
  1286. * @adreno_dev: Pointer to the adreno device
  1287. * @offset_name: The register enum that is to be written
  1288. * @val: Value to write
  1289. */
  1290. static inline void adreno_write_gmureg(struct adreno_device *adreno_dev,
  1291. enum adreno_regs offset_name, unsigned int val)
  1292. {
  1293. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  1294. if (adreno_checkreg_off(adreno_dev, offset_name))
  1295. gmu_core_regwrite(KGSL_DEVICE(adreno_dev),
  1296. gpudev->reg_offsets[offset_name], val);
  1297. }
  1298. /**
  1299. * adreno_gpu_fault() - Return the current state of the GPU
  1300. * @adreno_dev: A pointer to the adreno_device to query
  1301. *
  1302. * Return 0 if there is no fault or positive with the last type of fault that
  1303. * occurred
  1304. */
  1305. static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
  1306. {
  1307. /* make sure we're reading the latest value */
  1308. smp_rmb();
  1309. return atomic_read(&adreno_dev->dispatcher.fault);
  1310. }
  1311. /**
  1312. * adreno_set_gpu_fault() - Set the current fault status of the GPU
  1313. * @adreno_dev: A pointer to the adreno_device to set
  1314. * @state: fault state to set
  1315. *
  1316. */
  1317. static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
  1318. int state)
  1319. {
  1320. /* only set the fault bit w/o overwriting other bits */
  1321. atomic_or(state, &adreno_dev->dispatcher.fault);
  1322. /* make sure other CPUs see the update */
  1323. smp_wmb();
  1324. }
  1325. /**
  1326. * adreno_clear_gpu_fault() - Clear the GPU fault register
  1327. * @adreno_dev: A pointer to an adreno_device structure
  1328. *
  1329. * Clear the GPU fault status for the adreno device
  1330. */
  1331. static inline void adreno_clear_gpu_fault(struct adreno_device *adreno_dev)
  1332. {
  1333. atomic_set(&adreno_dev->dispatcher.fault, 0);
  1334. /* make sure other CPUs see the update */
  1335. smp_wmb();
  1336. }
  1337. /**
  1338. * adreno_gpu_halt() - Return the GPU halt refcount
  1339. * @adreno_dev: A pointer to the adreno_device
  1340. */
  1341. static inline int adreno_gpu_halt(struct adreno_device *adreno_dev)
  1342. {
  1343. /* make sure we're reading the latest value */
  1344. smp_rmb();
  1345. return atomic_read(&adreno_dev->halt);
  1346. }
  1347. /**
  1348. * adreno_clear_gpu_halt() - Clear the GPU halt refcount
  1349. * @adreno_dev: A pointer to the adreno_device
  1350. */
  1351. static inline void adreno_clear_gpu_halt(struct adreno_device *adreno_dev)
  1352. {
  1353. atomic_set(&adreno_dev->halt, 0);
  1354. /* make sure other CPUs see the update */
  1355. smp_wmb();
  1356. }
  1357. /**
  1358. * adreno_get_gpu_halt() - Increment GPU halt refcount
  1359. * @adreno_dev: A pointer to the adreno_device
  1360. */
  1361. static inline void adreno_get_gpu_halt(struct adreno_device *adreno_dev)
  1362. {
  1363. atomic_inc(&adreno_dev->halt);
  1364. }
  1365. /**
  1366. * adreno_put_gpu_halt() - Decrement GPU halt refcount
  1367. * @adreno_dev: A pointer to the adreno_device
  1368. */
  1369. static inline void adreno_put_gpu_halt(struct adreno_device *adreno_dev)
  1370. {
  1371. /* Make sure the refcount is good */
  1372. int ret = atomic_dec_if_positive(&adreno_dev->halt);
  1373. WARN(ret < 0, "GPU halt refcount unbalanced\n");
  1374. }
  1375. #ifdef CONFIG_DEBUG_FS
  1376. void adreno_debugfs_init(struct adreno_device *adreno_dev);
  1377. void adreno_context_debugfs_init(struct adreno_device *adreno_dev,
  1378. struct adreno_context *ctx);
  1379. #else
  1380. static inline void adreno_debugfs_init(struct adreno_device *adreno_dev) { }
  1381. static inline void adreno_context_debugfs_init(struct adreno_device *device,
  1382. struct adreno_context *context)
  1383. {
  1384. context->debug_root = NULL;
  1385. }
  1386. #endif
  1387. /**
  1388. * adreno_compare_pm4_version() - Compare the PM4 microcode version
  1389. * @adreno_dev: Pointer to the adreno_device struct
  1390. * @version: Version number to compare again
  1391. *
  1392. * Compare the current version against the specified version and return -1 if
  1393. * the current code is older, 0 if equal or 1 if newer.
  1394. */
  1395. static inline int adreno_compare_pm4_version(struct adreno_device *adreno_dev,
  1396. unsigned int version)
  1397. {
  1398. if (adreno_dev->fw[ADRENO_FW_PM4].version == version)
  1399. return 0;
  1400. return (adreno_dev->fw[ADRENO_FW_PM4].version > version) ? 1 : -1;
  1401. }
  1402. /**
  1403. * adreno_compare_pfp_version() - Compare the PFP microcode version
  1404. * @adreno_dev: Pointer to the adreno_device struct
  1405. * @version: Version number to compare against
  1406. *
  1407. * Compare the current version against the specified version and return -1 if
  1408. * the current code is older, 0 if equal or 1 if newer.
  1409. */
  1410. static inline int adreno_compare_pfp_version(struct adreno_device *adreno_dev,
  1411. unsigned int version)
  1412. {
  1413. if (adreno_dev->fw[ADRENO_FW_PFP].version == version)
  1414. return 0;
  1415. return (adreno_dev->fw[ADRENO_FW_PFP].version > version) ? 1 : -1;
  1416. }
  1417. /**
  1418. * adreno_in_preempt_state() - Check if preemption state is equal to given state
  1419. * @adreno_dev: Device whose preemption state is checked
  1420. * @state: State to compare against
  1421. */
  1422. static inline bool adreno_in_preempt_state(struct adreno_device *adreno_dev,
  1423. enum adreno_preempt_states state)
  1424. {
  1425. return atomic_read(&adreno_dev->preempt.state) == state;
  1426. }
  1427. /**
  1428. * adreno_set_preempt_state() - Set the specified preemption state
  1429. * @adreno_dev: Device to change preemption state
  1430. * @state: State to set
  1431. */
  1432. static inline void adreno_set_preempt_state(struct adreno_device *adreno_dev,
  1433. enum adreno_preempt_states state)
  1434. {
  1435. /*
  1436. * atomic_set doesn't use barriers, so we need to do it ourselves. One
  1437. * before...
  1438. */
  1439. smp_wmb();
  1440. atomic_set(&adreno_dev->preempt.state, state);
  1441. /* ... and one after */
  1442. smp_wmb();
  1443. }
  1444. static inline bool adreno_is_preemption_enabled(
  1445. struct adreno_device *adreno_dev)
  1446. {
  1447. return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
  1448. }
  1449. /**
  1450. * adreno_preemption_feature_set() - Check whether adreno preemption feature is statically enabled
  1451. * either via adreno feature bit, or via the cmdline override
  1452. * @adreno_dev: Device whose preemption state is checked
  1453. */
  1454. static inline bool adreno_preemption_feature_set(struct adreno_device *adreno_dev)
  1455. {
  1456. return ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION) || adreno_dev->preempt_override;
  1457. }
  1458. /*
  1459. * adreno_compare_prio_level() - Compares 2 priority levels based on enum values
  1460. * @p1: First priority level
  1461. * @p2: Second priority level
  1462. *
  1463. * Returns greater than 0 if p1 is higher priority, 0 if levels are equal else
  1464. * less than 0
  1465. */
  1466. static inline int adreno_compare_prio_level(int p1, int p2)
  1467. {
  1468. return p2 - p1;
  1469. }
  1470. void adreno_readreg64(struct adreno_device *adreno_dev,
  1471. enum adreno_regs lo, enum adreno_regs hi, uint64_t *val);
  1472. void adreno_writereg64(struct adreno_device *adreno_dev,
  1473. enum adreno_regs lo, enum adreno_regs hi, uint64_t val);
  1474. unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb);
  1475. void adreno_touch_wake(struct kgsl_device *device);
  1476. static inline bool adreno_rb_empty(struct adreno_ringbuffer *rb)
  1477. {
  1478. return (adreno_get_rptr(rb) == rb->wptr);
  1479. }
  1480. static inline bool adreno_soft_fault_detect(struct adreno_device *adreno_dev)
  1481. {
  1482. return adreno_dev->fast_hang_detect &&
  1483. !test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv);
  1484. }
  1485. static inline bool adreno_long_ib_detect(struct adreno_device *adreno_dev)
  1486. {
  1487. return adreno_dev->long_ib_detect &&
  1488. !test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv);
  1489. }
  1490. /**
  1491. * adreno_support_64bit - Return true if the GPU supports 64 bit addressing
  1492. * @adreno_dev: An Adreno GPU device handle
  1493. *
  1494. * Return: True if the device supports 64 bit addressing
  1495. */
  1496. static inline bool adreno_support_64bit(struct adreno_device *adreno_dev)
  1497. {
  1498. /*
  1499. * The IOMMU API takes a unsigned long for the iova so we can't support
  1500. * 64 bit addresses when the kernel is in 32 bit mode even if we wanted
  1501. * so we need to check that we are using a5xx or newer and that the
  1502. * unsigned long is big enough for our purposes.
  1503. */
  1504. return (BITS_PER_LONG > 32 && ADRENO_GPUREV(adreno_dev) >= 500);
  1505. }
  1506. static inline void adreno_ringbuffer_set_pagetable(struct kgsl_device *device,
  1507. struct adreno_ringbuffer *rb, struct kgsl_pagetable *pt)
  1508. {
  1509. unsigned long flags;
  1510. spin_lock_irqsave(&rb->preempt_lock, flags);
  1511. kgsl_sharedmem_writel(device->scratch,
  1512. SCRATCH_RB_OFFSET(rb->id, current_rb_ptname), pt->name);
  1513. kgsl_sharedmem_writeq(device->scratch,
  1514. SCRATCH_RB_OFFSET(rb->id, ttbr0),
  1515. kgsl_mmu_pagetable_get_ttbr0(pt));
  1516. kgsl_sharedmem_writel(device->scratch,
  1517. SCRATCH_RB_OFFSET(rb->id, contextidr), 0);
  1518. spin_unlock_irqrestore(&rb->preempt_lock, flags);
  1519. }
  1520. static inline u32 counter_delta(struct kgsl_device *device,
  1521. unsigned int reg, unsigned int *counter)
  1522. {
  1523. u32 val, ret = 0;
  1524. if (!reg)
  1525. return 0;
  1526. kgsl_regread(device, reg, &val);
  1527. if (*counter) {
  1528. if (val >= *counter)
  1529. ret = val - *counter;
  1530. else
  1531. ret = (UINT_MAX - *counter) + val;
  1532. }
  1533. *counter = val;
  1534. return ret;
  1535. }
  1536. static inline int adreno_perfcntr_active_oob_get(
  1537. struct adreno_device *adreno_dev)
  1538. {
  1539. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1540. int ret = adreno_active_count_get(adreno_dev);
  1541. if (!ret) {
  1542. ret = gmu_core_dev_oob_set(device, oob_perfcntr);
  1543. if (ret)
  1544. adreno_active_count_put(adreno_dev);
  1545. }
  1546. return ret;
  1547. }
  1548. static inline void adreno_perfcntr_active_oob_put(
  1549. struct adreno_device *adreno_dev)
  1550. {
  1551. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1552. gmu_core_dev_oob_clear(device, oob_perfcntr);
  1553. adreno_active_count_put(adreno_dev);
  1554. }
  1555. /**
  1556. * adreno_wait_for_halt_ack - wait for acknowlegement for a bus halt request
  1557. * @ack_reg: register offset to wait for acknowledge
  1558. * @mask: A mask value to wait for
  1559. *
  1560. * Return: 0 on success or -ETIMEDOUT if the request timed out
  1561. */
  1562. static inline int adreno_wait_for_halt_ack(struct kgsl_device *device,
  1563. int ack_reg, unsigned int mask)
  1564. {
  1565. u32 val;
  1566. int ret = kgsl_regmap_read_poll_timeout(&device->regmap, ack_reg,
  1567. val, (val & mask) == mask, 100, 100 * 1000);
  1568. if (ret)
  1569. dev_err(device->dev,
  1570. "GBIF/VBIF Halt ack timeout: reg=%08x mask=%08x status=%08x\n",
  1571. ack_reg, mask, val);
  1572. return ret;
  1573. }
  1574. /**
  1575. * adreno_move_preempt_state - Update the preemption state
  1576. * @adreno_dev: An Adreno GPU device handle
  1577. * @old: The current state of the preemption
  1578. * @new: The new state of the preemption
  1579. *
  1580. * Return: True if the state was updated or false if not
  1581. */
  1582. static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
  1583. enum adreno_preempt_states old, enum adreno_preempt_states new)
  1584. {
  1585. return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old);
  1586. }
  1587. /**
  1588. * adreno_reg_offset_init - Helper function to initialize reg_offsets
  1589. * @reg_offsets: Pointer to an array of register offsets
  1590. *
  1591. * Helper function to setup register_offsets for a target. Go through
  1592. * and set ADRENO_REG_UNUSED for all unused entries in the list.
  1593. */
  1594. static inline void adreno_reg_offset_init(u32 *reg_offsets)
  1595. {
  1596. int i;
  1597. /*
  1598. * Initialize uninitialzed gpu registers, only needs to be done once.
  1599. * Make all offsets that are not initialized to ADRENO_REG_UNUSED
  1600. */
  1601. for (i = 0; i < ADRENO_REG_REGISTER_MAX; i++) {
  1602. if (!reg_offsets[i])
  1603. reg_offsets[i] = ADRENO_REG_UNUSED;
  1604. }
  1605. }
  1606. static inline u32 adreno_get_level(struct kgsl_context *context)
  1607. {
  1608. u32 level;
  1609. if (kgsl_context_is_lpac(context))
  1610. return KGSL_LPAC_RB_ID;
  1611. level = context->priority / KGSL_PRIORITY_MAX_RB_LEVELS;
  1612. return min_t(u32, level, KGSL_PRIORITY_MAX_RB_LEVELS - 1);
  1613. }
  1614. /**
  1615. * adreno_get_firwmare - Load firmware into a adreno_firmware struct
  1616. * @adreno_dev: An Adreno GPU device handle
  1617. * @fwfile: Firmware file to load
  1618. * @firmware: A &struct adreno_firmware container for the firmware.
  1619. *
  1620. * Load the specified firmware file into the memdesc in &struct adreno_firmware
  1621. * and get the size and version from the data.
  1622. *
  1623. * Return: 0 on success or negative on failure
  1624. */
  1625. int adreno_get_firmware(struct adreno_device *adreno_dev,
  1626. const char *fwfile, struct adreno_firmware *firmware);
  1627. /**
  1628. * adreno_zap_shader_load - Helper function for loading the zap shader
  1629. * adreno_dev: A handle to an Adreno GPU device
  1630. * name: Name of the zap shader to load
  1631. *
  1632. * A target indepedent helper function for loading the zap shader.
  1633. *
  1634. * Return: 0 on success or negative on failure.
  1635. */
  1636. int adreno_zap_shader_load(struct adreno_device *adreno_dev,
  1637. const char *name);
  1638. /**
  1639. * adreno_irq_callbacks - Helper function to handle IRQ callbacks
  1640. * @adreno_dev: Adreno GPU device handle
  1641. * @funcs: List of callback functions
  1642. * @status: Interrupt status
  1643. *
  1644. * Walk the bits in the interrupt status and call any applicable callbacks.
  1645. * Return: IRQ_HANDLED if one or more interrupt callbacks were called.
  1646. */
  1647. irqreturn_t adreno_irq_callbacks(struct adreno_device *adreno_dev,
  1648. const struct adreno_irq_funcs *funcs, u32 status);
  1649. /**
  1650. * adreno_device_probe - Generic adreno device probe function
  1651. * @pdev: Pointer to the platform device
  1652. * @adreno_dev: Adreno GPU device handle
  1653. *
  1654. * Do the generic setup for the Adreno device. Called from the target specific
  1655. * probe functions.
  1656. *
  1657. * Return: 0 on success or negative on failure
  1658. */
  1659. int adreno_device_probe(struct platform_device *pdev,
  1660. struct adreno_device *adreno_dev);
  1661. /**
  1662. * adreno_power_cycle - Suspend and resume the device
  1663. * @adreno_dev: Pointer to the adreno device
  1664. * @callback: Function that needs to be executed
  1665. * @priv: Argument to be passed to the callback
  1666. *
  1667. * Certain properties that can be set via sysfs need to power
  1668. * cycle the device to take effect. This function suspends
  1669. * the device, executes the callback, and resumes the device.
  1670. *
  1671. * Return: 0 on success or negative on failure
  1672. */
  1673. int adreno_power_cycle(struct adreno_device *adreno_dev,
  1674. void (*callback)(struct adreno_device *adreno_dev, void *priv),
  1675. void *priv);
  1676. /**
  1677. * adreno_power_cycle_bool - Power cycle the device to change device setting
  1678. * @adreno_dev: Pointer to the adreno device
  1679. * @flag: Flag that needs to be set
  1680. * @val: The value flag should be set to
  1681. *
  1682. * Certain properties that can be set via sysfs need to power cycle the device
  1683. * to take effect. This function suspends the device, sets the flag, and
  1684. * resumes the device.
  1685. *
  1686. * Return: 0 on success or negative on failure
  1687. */
  1688. int adreno_power_cycle_bool(struct adreno_device *adreno_dev,
  1689. bool *flag, bool val);
  1690. /**
  1691. * adreno_power_cycle_u32 - Power cycle the device to change device setting
  1692. * @adreno_dev: Pointer to the adreno device
  1693. * @flag: Flag that needs to be set
  1694. * @val: The value flag should be set to
  1695. *
  1696. * Certain properties that can be set via sysfs need to power cycle the device
  1697. * to take effect. This function suspends the device, sets the flag, and
  1698. * resumes the device.
  1699. *
  1700. * Return: 0 on success or negative on failure
  1701. */
  1702. int adreno_power_cycle_u32(struct adreno_device *adreno_dev,
  1703. u32 *flag, u32 val);
  1704. /**
  1705. * adreno_set_active_ctxs_null - Give up active context refcount
  1706. * @adreno_dev: Adreno GPU device handle
  1707. *
  1708. * This puts back the reference for that last active context on
  1709. * each ringbuffer when going in and out of slumber.
  1710. */
  1711. void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev);
  1712. /**
  1713. * adreno_get_bus_counters - Allocate the bus dcvs counters
  1714. * @adreno_dev: Adreno GPU device handle
  1715. *
  1716. * This function allocates the various gpu counters to measure
  1717. * gpu bus usage for bus dcvs
  1718. */
  1719. void adreno_get_bus_counters(struct adreno_device *adreno_dev);
  1720. /**
  1721. * adreno_suspend_context - Make sure device is idle
  1722. * @device: Pointer to the kgsl device
  1723. *
  1724. * This function processes the profiling results and checks if the
  1725. * device is idle so that it can be turned off safely
  1726. *
  1727. * Return: 0 on success or negative error on failure
  1728. */
  1729. int adreno_suspend_context(struct kgsl_device *device);
  1730. /*
  1731. * adreno_profile_submit_time - Populate profiling buffer with timestamps
  1732. * @time: Container for the statistics
  1733. *
  1734. * Populate the draw object user profiling buffer with the timestamps
  1735. * recored in the adreno_submit_time structure at the time of draw object
  1736. * submission.
  1737. */
  1738. void adreno_profile_submit_time(struct adreno_submit_time *time);
  1739. void adreno_preemption_timer(struct timer_list *t);
  1740. /**
  1741. * adreno_create_profile_buffer - Create a buffer to store profiling data
  1742. * @adreno_dev: Adreno GPU device handle
  1743. */
  1744. void adreno_create_profile_buffer(struct adreno_device *adreno_dev);
  1745. /**
  1746. * adreno_isidle - return true if the hardware is idle
  1747. * @adreno_dev: Adreno GPU device handle
  1748. *
  1749. * Return: True if the hardware is idle
  1750. */
  1751. bool adreno_isidle(struct adreno_device *adreno_dev);
  1752. /**
  1753. * adreno_allocate_global - Helper function to allocate a global GPU object
  1754. * @device: A GPU device handle
  1755. * @memdesc: Pointer to a &struct kgsl_memdesc pointer
  1756. * @size: Size of the allocation in bytes
  1757. * @padding: Amount of extra adding to add to the VA allocation
  1758. * @flags: Control flags for the allocation
  1759. * @priv: Internal flags for the allocation
  1760. * @name: Name of the allocation (for the debugfs file)
  1761. *
  1762. * Allocate a global object if it hasn't already been alllocated and put it in
  1763. * the pointer pointed to by @memdesc.
  1764. * Return: 0 on success or negative on error
  1765. */
  1766. static inline int adreno_allocate_global(struct kgsl_device *device,
  1767. struct kgsl_memdesc **memdesc, u64 size, u32 padding, u64 flags,
  1768. u32 priv, const char *name)
  1769. {
  1770. if (!IS_ERR_OR_NULL(*memdesc))
  1771. return 0;
  1772. *memdesc = kgsl_allocate_global(device, size, padding, flags, priv, name);
  1773. return PTR_ERR_OR_ZERO(*memdesc);
  1774. }
  1775. static inline void adreno_set_dispatch_ops(struct adreno_device *adreno_dev,
  1776. const struct adreno_dispatch_ops *ops)
  1777. {
  1778. adreno_dev->dispatch_ops = ops;
  1779. }
  1780. #ifdef CONFIG_QCOM_KGSL_FENCE_TRACE
  1781. /**
  1782. * adreno_fence_trace_array_init - Initialize an always on trace array
  1783. * @device: A GPU device handle
  1784. *
  1785. * Register an always-on trace array to for fence timeout debugging
  1786. */
  1787. void adreno_fence_trace_array_init(struct kgsl_device *device);
  1788. #else
  1789. static inline void adreno_fence_trace_array_init(struct kgsl_device *device) {}
  1790. #endif
  1791. /*
  1792. * adreno_drawobj_set_constraint - Set a power constraint
  1793. * @device: Pointer to a KGSL device structure
  1794. * @drawobj: Draw object for which constraint is to be set
  1795. *
  1796. * Set the power constraint if requested by this context
  1797. */
  1798. void adreno_drawobj_set_constraint(struct kgsl_device *device,
  1799. struct kgsl_drawobj *drawobj);
  1800. /**
  1801. * adreno_get_gpu_model - Gets gpu model name from device tree (or) chipid
  1802. * @device: A GPU device handle
  1803. *
  1804. * Return: GPU model name string
  1805. */
  1806. const char *adreno_get_gpu_model(struct kgsl_device *device);
  1807. int adreno_verify_cmdobj(struct kgsl_device_private *dev_priv,
  1808. struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
  1809. uint32_t count);
  1810. /**
  1811. * adreno_mark_for_coldboot - Set a flag to coldboot gpu in the slumber exit
  1812. * @adreno_dev: Adreno device handle
  1813. *
  1814. */
  1815. void adreno_mark_for_coldboot(struct adreno_device *adreno_dev);
  1816. /**
  1817. * adreno_smmu_is_stalled() - Check whether smmu is stalled or not
  1818. * @device: Pointer to adreno device
  1819. *
  1820. * Return - True if smmu is stalled or false otherwise
  1821. */
  1822. bool adreno_smmu_is_stalled(struct adreno_device *adreno_dev);
  1823. /**
  1824. * adreno_get_ahb_timeout_val() - Get the ahb_timeout value
  1825. * @adreno_dev: Adreno device handle
  1826. * @noc_timeout_us: GPU config NOC timeout value in usec
  1827. *
  1828. * Return - AHB timeout value to be programmed in AHB CNTL registers
  1829. */
  1830. u32 adreno_get_ahb_timeout_val(struct adreno_device *adreno_dev, u32 noc_timeout_us);
  1831. /**
  1832. * adreno_llcc_slice_deactivate - Deactivate GPU and GPUHTW llcc slices
  1833. * @adreno_dev: Adreno device handle
  1834. */
  1835. static inline void adreno_llcc_slice_deactivate(struct adreno_device *adreno_dev)
  1836. {
  1837. if (adreno_dev->gpu_llc_slice_enable && !IS_ERR_OR_NULL(adreno_dev->gpu_llc_slice))
  1838. llcc_slice_deactivate(adreno_dev->gpu_llc_slice);
  1839. if (adreno_dev->gpuhtw_llc_slice_enable && !IS_ERR_OR_NULL(adreno_dev->gpuhtw_llc_slice))
  1840. llcc_slice_deactivate(adreno_dev->gpuhtw_llc_slice);
  1841. }
  1842. /**
  1843. * adreno_irq_free - Free an interrupt allocated for GPU
  1844. * @adreno_dev: Adreno device handle
  1845. */
  1846. static inline void adreno_irq_free(struct adreno_device *adreno_dev)
  1847. {
  1848. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1849. if (!(adreno_dev->irq_mask || device->pwrctrl.interrupt_num))
  1850. return;
  1851. devm_free_irq(&device->pdev->dev, device->pwrctrl.interrupt_num, device);
  1852. adreno_dev->irq_mask = 0;
  1853. device->pwrctrl.interrupt_num = 0;
  1854. }
  1855. #endif /*__ADRENO_H */