adreno_a5xx.c 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/clk/qcom.h>
  7. #include <linux/delay.h>
  8. #include <linux/firmware.h>
  9. #include <linux/of.h>
  10. #include <linux/of_device.h>
  11. #include <linux/qcom_scm.h>
  12. #include <linux/slab.h>
  13. #include "adreno.h"
  14. #include "adreno_a5xx.h"
  15. #include "adreno_a5xx_packets.h"
  16. #include "adreno_pm4types.h"
  17. #include "adreno_trace.h"
  18. #include "kgsl_trace.h"
  19. static int critical_packet_constructed;
  20. static unsigned int crit_pkts_dwords;
  21. static void a5xx_irq_storm_worker(struct work_struct *work);
  22. static int _read_fw2_block_header(struct kgsl_device *device,
  23. uint32_t *header, uint32_t remain,
  24. uint32_t id, uint32_t major, uint32_t minor);
  25. static void a5xx_gpmu_reset(struct work_struct *work);
  26. static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
  27. /**
  28. * Number of times to check if the regulator enabled before
  29. * giving up and returning failure.
  30. */
  31. #define PWR_RETRY 100
  32. /**
  33. * Number of times to check if the GPMU firmware is initialized before
  34. * giving up and returning failure.
  35. */
  36. #define GPMU_FW_INIT_RETRY 5000
  37. #define A530_QFPROM_RAW_PTE_ROW0_MSB 0x134
  38. #define A530_QFPROM_RAW_PTE_ROW2_MSB 0x144
  39. #define A5XX_INT_MASK \
  40. ((1 << A5XX_INT_RBBM_AHB_ERROR) | \
  41. (1 << A5XX_INT_RBBM_TRANSFER_TIMEOUT) | \
  42. (1 << A5XX_INT_RBBM_ME_MS_TIMEOUT) | \
  43. (1 << A5XX_INT_RBBM_PFP_MS_TIMEOUT) | \
  44. (1 << A5XX_INT_RBBM_ETS_MS_TIMEOUT) | \
  45. (1 << A5XX_INT_RBBM_ATB_ASYNC_OVERFLOW) | \
  46. (1 << A5XX_INT_RBBM_GPC_ERROR) | \
  47. (1 << A5XX_INT_CP_HW_ERROR) | \
  48. (1 << A5XX_INT_CP_CACHE_FLUSH_TS) | \
  49. (1 << A5XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
  50. (1 << A5XX_INT_MISC_HANG_DETECT) | \
  51. (1 << A5XX_INT_UCHE_OOB_ACCESS) | \
  52. (1 << A5XX_INT_UCHE_TRAP_INTR) | \
  53. (1 << A5XX_INT_CP_SW) | \
  54. (1 << A5XX_INT_GPMU_FIRMWARE) | \
  55. (1 << A5XX_INT_GPMU_VOLTAGE_DROOP))
  56. static int a5xx_probe(struct platform_device *pdev,
  57. u32 chipid, const struct adreno_gpu_core *gpucore)
  58. {
  59. struct adreno_device *adreno_dev;
  60. struct kgsl_device *device;
  61. int ret;
  62. adreno_dev = (struct adreno_device *)
  63. of_device_get_match_data(&pdev->dev);
  64. memset(adreno_dev, 0, sizeof(*adreno_dev));
  65. adreno_dev->gpucore = gpucore;
  66. adreno_dev->chipid = chipid;
  67. adreno_reg_offset_init(gpucore->gpudev->reg_offsets);
  68. adreno_dev->sptp_pc_enabled =
  69. ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC);
  70. if (adreno_is_a540(adreno_dev))
  71. adreno_dev->throttling_enabled = true;
  72. adreno_dev->hwcg_enabled = true;
  73. adreno_dev->lm_enabled =
  74. ADRENO_FEATURE(adreno_dev, ADRENO_LM);
  75. /* Setup defaults that might get changed by the fuse bits */
  76. adreno_dev->lm_leakage = 0x4e001a;
  77. device = KGSL_DEVICE(adreno_dev);
  78. timer_setup(&device->idle_timer, kgsl_timer, 0);
  79. INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
  80. adreno_dev->irq_mask = A5XX_INT_MASK;
  81. ret = adreno_device_probe(pdev, adreno_dev);
  82. if (ret)
  83. return ret;
  84. a5xx_coresight_init(adreno_dev);
  85. return adreno_dispatcher_init(adreno_dev);
  86. }
  87. static void _do_fixup(const struct adreno_critical_fixup *fixups, int count,
  88. uint64_t *gpuaddrs, unsigned int *buffer)
  89. {
  90. int i;
  91. for (i = 0; i < count; i++) {
  92. buffer[fixups[i].lo_offset] =
  93. lower_32_bits(gpuaddrs[fixups[i].buffer]) |
  94. fixups[i].mem_offset;
  95. buffer[fixups[i].hi_offset] =
  96. upper_32_bits(gpuaddrs[fixups[i].buffer]);
  97. }
  98. }
  99. static int a5xx_critical_packet_construct(struct adreno_device *adreno_dev)
  100. {
  101. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  102. unsigned int *cmds;
  103. uint64_t gpuaddrs[4];
  104. adreno_dev->critpkts = kgsl_allocate_global(device,
  105. PAGE_SIZE * 4, 0, 0, 0, "crit_pkts");
  106. if (IS_ERR(adreno_dev->critpkts))
  107. return PTR_ERR(adreno_dev->critpkts);
  108. adreno_dev->critpkts_secure = kgsl_allocate_global(device,
  109. PAGE_SIZE, 0, KGSL_MEMFLAGS_SECURE, 0, "crit_pkts_secure");
  110. if (IS_ERR(adreno_dev->critpkts_secure))
  111. return PTR_ERR(adreno_dev->critpkts_secure);
  112. cmds = adreno_dev->critpkts->hostptr;
  113. gpuaddrs[0] = adreno_dev->critpkts_secure->gpuaddr;
  114. gpuaddrs[1] = adreno_dev->critpkts->gpuaddr + PAGE_SIZE;
  115. gpuaddrs[2] = adreno_dev->critpkts->gpuaddr + (PAGE_SIZE * 2);
  116. gpuaddrs[3] = adreno_dev->critpkts->gpuaddr + (PAGE_SIZE * 3);
  117. crit_pkts_dwords = ARRAY_SIZE(_a5xx_critical_pkts);
  118. memcpy(cmds, _a5xx_critical_pkts, crit_pkts_dwords << 2);
  119. _do_fixup(critical_pkt_fixups, ARRAY_SIZE(critical_pkt_fixups),
  120. gpuaddrs, cmds);
  121. cmds = adreno_dev->critpkts->hostptr + PAGE_SIZE;
  122. memcpy(cmds, _a5xx_critical_pkts_mem01,
  123. ARRAY_SIZE(_a5xx_critical_pkts_mem01) << 2);
  124. cmds = adreno_dev->critpkts->hostptr + (PAGE_SIZE * 2);
  125. memcpy(cmds, _a5xx_critical_pkts_mem02,
  126. ARRAY_SIZE(_a5xx_critical_pkts_mem02) << 2);
  127. cmds = adreno_dev->critpkts->hostptr + (PAGE_SIZE * 3);
  128. memcpy(cmds, _a5xx_critical_pkts_mem03,
  129. ARRAY_SIZE(_a5xx_critical_pkts_mem03) << 2);
  130. _do_fixup(critical_pkt_mem03_fixups,
  131. ARRAY_SIZE(critical_pkt_mem03_fixups), gpuaddrs, cmds);
  132. critical_packet_constructed = 1;
  133. return 0;
  134. }
  135. static int a5xx_microcode_read(struct adreno_device *adreno_dev);
  136. static int a5xx_init(struct adreno_device *adreno_dev)
  137. {
  138. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  139. int ret;
  140. ret = a5xx_ringbuffer_init(adreno_dev);
  141. if (ret)
  142. return ret;
  143. ret = a5xx_microcode_read(adreno_dev);
  144. if (ret)
  145. return ret;
  146. if (a5xx_has_gpmu(adreno_dev))
  147. INIT_WORK(&adreno_dev->gpmu_work, a5xx_gpmu_reset);
  148. adreno_dev->highest_bank_bit = a5xx_core->highest_bank_bit;
  149. INIT_WORK(&adreno_dev->irq_storm_work, a5xx_irq_storm_worker);
  150. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS))
  151. a5xx_critical_packet_construct(adreno_dev);
  152. adreno_create_profile_buffer(adreno_dev);
  153. a5xx_crashdump_init(adreno_dev);
  154. return 0;
  155. }
  156. static const struct {
  157. u32 reg;
  158. u32 base;
  159. u32 count;
  160. } a5xx_protected_blocks[] = {
  161. /* RBBM */
  162. { A5XX_CP_PROTECT_REG_0, 0x004, 2 },
  163. { A5XX_CP_PROTECT_REG_0 + 1, 0x008, 3 },
  164. { A5XX_CP_PROTECT_REG_0 + 2, 0x010, 4 },
  165. { A5XX_CP_PROTECT_REG_0 + 3, 0x020, 5 },
  166. { A5XX_CP_PROTECT_REG_0 + 4, 0x040, 6 },
  167. { A5XX_CP_PROTECT_REG_0 + 5, 0x080, 6 },
  168. /* Content protection */
  169. { A5XX_CP_PROTECT_REG_0 + 6, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4 },
  170. { A5XX_CP_PROTECT_REG_0 + 7, A5XX_RBBM_SECVID_TRUST_CNTL, 1 },
  171. /* CP */
  172. { A5XX_CP_PROTECT_REG_0 + 8, 0x800, 6 },
  173. { A5XX_CP_PROTECT_REG_0 + 9, 0x840, 3 },
  174. { A5XX_CP_PROTECT_REG_0 + 10, 0x880, 5 },
  175. { A5XX_CP_PROTECT_REG_0 + 11, 0xaa0, 0 },
  176. /* RB */
  177. { A5XX_CP_PROTECT_REG_0 + 12, 0xcc0, 0 },
  178. { A5XX_CP_PROTECT_REG_0 + 13, 0xcf0, 1 },
  179. /* VPC */
  180. { A5XX_CP_PROTECT_REG_0 + 14, 0xe68, 3 },
  181. { A5XX_CP_PROTECT_REG_0 + 15, 0xe70, 4 },
  182. /* UCHE */
  183. { A5XX_CP_PROTECT_REG_0 + 16, 0xe80, 4 },
  184. /* A5XX_CP_PROTECT_REG_17 will be used for SMMU */
  185. /* A5XX_CP_PROTECT_REG_18 - A5XX_CP_PROTECT_REG_31 are available */
  186. };
  187. static void _setprotectreg(struct kgsl_device *device, u32 offset,
  188. u32 base, u32 count)
  189. {
  190. kgsl_regwrite(device, offset, 0x60000000 | (count << 24) | (base << 2));
  191. }
  192. static void a5xx_protect_init(struct adreno_device *adreno_dev)
  193. {
  194. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  195. u32 reg;
  196. int i;
  197. /* enable access protection to privileged registers */
  198. kgsl_regwrite(device, A5XX_CP_PROTECT_CNTL, 0x00000007);
  199. for (i = 0; i < ARRAY_SIZE(a5xx_protected_blocks); i++) {
  200. reg = a5xx_protected_blocks[i].reg;
  201. _setprotectreg(device, reg, a5xx_protected_blocks[i].base,
  202. a5xx_protected_blocks[i].count);
  203. }
  204. /*
  205. * For a530 and a540 the SMMU region is 0x20000 bytes long and 0x10000
  206. * bytes on all other targets. The base offset for both is 0x40000.
  207. * Write it to the next available slot
  208. */
  209. if (adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))
  210. _setprotectreg(device, reg + 1, 0x40000, ilog2(0x20000));
  211. else
  212. _setprotectreg(device, reg + 1, 0x40000, ilog2(0x10000));
  213. }
  214. /*
  215. * _poll_gdsc_status() - Poll the GDSC status register
  216. * @adreno_dev: The adreno device pointer
  217. * @status_reg: Offset of the status register
  218. * @status_value: The expected bit value
  219. *
  220. * Poll the status register till the power-on bit is equal to the
  221. * expected value or the max retries are exceeded.
  222. */
  223. static int _poll_gdsc_status(struct adreno_device *adreno_dev,
  224. unsigned int status_reg,
  225. unsigned int status_value)
  226. {
  227. unsigned int reg, retry = PWR_RETRY;
  228. /* Bit 20 is the power on bit of SPTP and RAC GDSC status register */
  229. do {
  230. udelay(1);
  231. kgsl_regread(KGSL_DEVICE(adreno_dev), status_reg, &reg);
  232. } while (((reg & BIT(20)) != (status_value << 20)) && retry--);
  233. if ((reg & BIT(20)) != (status_value << 20))
  234. return -ETIMEDOUT;
  235. return 0;
  236. }
  237. static void a5xx_restore_isense_regs(struct adreno_device *adreno_dev)
  238. {
  239. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  240. unsigned int reg, i, ramp = GPMU_ISENSE_SAVE;
  241. static unsigned int isense_regs[6] = {0xFFFF}, isense_reg_addr[] = {
  242. A5XX_GPU_CS_DECIMAL_ALIGN,
  243. A5XX_GPU_CS_SENSOR_PARAM_CORE_1,
  244. A5XX_GPU_CS_SENSOR_PARAM_CORE_2,
  245. A5XX_GPU_CS_SW_OV_FUSE_EN,
  246. A5XX_GPU_CS_ENDPOINT_CALIBRATION_DONE,
  247. A5XX_GPMU_TEMP_SENSOR_CONFIG};
  248. if (!adreno_is_a540(adreno_dev))
  249. return;
  250. /* read signature */
  251. kgsl_regread(device, ramp++, &reg);
  252. if (reg == 0xBABEFACE) {
  253. /* store memory locations in buffer */
  254. for (i = 0; i < ARRAY_SIZE(isense_regs); i++)
  255. kgsl_regread(device, ramp + i, isense_regs + i);
  256. /* clear signature */
  257. kgsl_regwrite(device, GPMU_ISENSE_SAVE, 0x0);
  258. }
  259. /* if we never stored memory locations - do nothing */
  260. if (isense_regs[0] == 0xFFFF)
  261. return;
  262. /* restore registers from memory */
  263. for (i = 0; i < ARRAY_SIZE(isense_reg_addr); i++)
  264. kgsl_regwrite(device, isense_reg_addr[i], isense_regs[i]);
  265. }
  266. /*
  267. * a5xx_regulator_enable() - Enable any necessary HW regulators
  268. * @adreno_dev: The adreno device pointer
  269. *
  270. * Some HW blocks may need their regulators explicitly enabled
  271. * on a restart. Clocks must be on during this call.
  272. */
  273. static int a5xx_regulator_enable(struct adreno_device *adreno_dev)
  274. {
  275. unsigned int ret;
  276. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  277. if (test_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
  278. &adreno_dev->priv))
  279. return 0;
  280. if (!(adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))) {
  281. /* Halt the sp_input_clk at HM level */
  282. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL, 0x00000055);
  283. a5xx_hwcg_set(adreno_dev, true);
  284. /* Turn on sp_input_clk at HM level */
  285. kgsl_regrmw(device, A5XX_RBBM_CLOCK_CNTL, 0xFF, 0);
  286. set_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
  287. &adreno_dev->priv);
  288. return 0;
  289. }
  290. /*
  291. * Turn on smaller power domain first to reduce voltage droop.
  292. * Set the default register values; set SW_COLLAPSE to 0.
  293. */
  294. kgsl_regwrite(device, A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
  295. /* Insert a delay between RAC and SPTP GDSC to reduce voltage droop */
  296. udelay(3);
  297. ret = _poll_gdsc_status(adreno_dev, A5XX_GPMU_RBCCU_PWR_CLK_STATUS, 1);
  298. if (ret) {
  299. dev_err(device->dev, "RBCCU GDSC enable failed\n");
  300. return ret;
  301. }
  302. kgsl_regwrite(device, A5XX_GPMU_SP_POWER_CNTL, 0x778000);
  303. ret = _poll_gdsc_status(adreno_dev, A5XX_GPMU_SP_PWR_CLK_STATUS, 1);
  304. if (ret) {
  305. dev_err(device->dev, "SPTP GDSC enable failed\n");
  306. return ret;
  307. }
  308. /* Disable SP clock */
  309. kgsl_regrmw(device, A5XX_GPMU_GPMU_SP_CLOCK_CONTROL,
  310. CNTL_IP_CLK_ENABLE, 0);
  311. /* Enable hardware clockgating */
  312. a5xx_hwcg_set(adreno_dev, true);
  313. /* Enable SP clock */
  314. kgsl_regrmw(device, A5XX_GPMU_GPMU_SP_CLOCK_CONTROL,
  315. CNTL_IP_CLK_ENABLE, 1);
  316. a5xx_restore_isense_regs(adreno_dev);
  317. set_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED, &adreno_dev->priv);
  318. return 0;
  319. }
  320. /*
  321. * a5xx_regulator_disable() - Disable any necessary HW regulators
  322. * @adreno_dev: The adreno device pointer
  323. *
  324. * Some HW blocks may need their regulators explicitly disabled
  325. * on a power down to prevent current spikes. Clocks must be on
  326. * during this call.
  327. */
  328. static void a5xx_regulator_disable(struct adreno_device *adreno_dev)
  329. {
  330. unsigned int reg;
  331. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  332. if (adreno_is_a512(adreno_dev) || adreno_is_a508(adreno_dev))
  333. return;
  334. if (!test_and_clear_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
  335. &adreno_dev->priv))
  336. return;
  337. /* If feature is not supported or not enabled */
  338. if (!adreno_dev->sptp_pc_enabled) {
  339. /* Set the default register values; set SW_COLLAPSE to 1 */
  340. kgsl_regwrite(device, A5XX_GPMU_SP_POWER_CNTL, 0x778001);
  341. /*
  342. * Insert a delay between SPTP and RAC GDSC to reduce voltage
  343. * droop.
  344. */
  345. udelay(3);
  346. if (_poll_gdsc_status(adreno_dev,
  347. A5XX_GPMU_SP_PWR_CLK_STATUS, 0))
  348. dev_warn(device->dev, "SPTP GDSC disable failed\n");
  349. kgsl_regwrite(device, A5XX_GPMU_RBCCU_POWER_CNTL, 0x778001);
  350. if (_poll_gdsc_status(adreno_dev,
  351. A5XX_GPMU_RBCCU_PWR_CLK_STATUS, 0))
  352. dev_warn(device->dev, "RBCCU GDSC disable failed\n");
  353. } else if (test_bit(ADRENO_DEVICE_GPMU_INITIALIZED,
  354. &adreno_dev->priv)) {
  355. /* GPMU firmware is supposed to turn off SPTP & RAC GDSCs. */
  356. kgsl_regread(device, A5XX_GPMU_SP_PWR_CLK_STATUS, &reg);
  357. if (reg & BIT(20))
  358. dev_warn(device->dev, "SPTP GDSC is not disabled\n");
  359. kgsl_regread(device, A5XX_GPMU_RBCCU_PWR_CLK_STATUS, &reg);
  360. if (reg & BIT(20))
  361. dev_warn(device->dev, "RBCCU GDSC is not disabled\n");
  362. /*
  363. * GPMU firmware is supposed to set GMEM to non-retention.
  364. * Bit 14 is the memory core force on bit.
  365. */
  366. kgsl_regread(device, A5XX_GPMU_RBCCU_CLOCK_CNTL, &reg);
  367. if (reg & BIT(14))
  368. dev_warn(device->dev, "GMEM is forced on\n");
  369. }
  370. if (adreno_is_a530(adreno_dev)) {
  371. /* Reset VBIF before PC to avoid popping bogus FIFO entries */
  372. kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD,
  373. 0x003C0000);
  374. kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, 0);
  375. }
  376. }
  377. /*
  378. * a5xx_enable_pc() - Enable the GPMU based power collapse of the SPTP and RAC
  379. * blocks
  380. * @adreno_dev: The adreno device pointer
  381. */
  382. static void a5xx_enable_pc(struct adreno_device *adreno_dev)
  383. {
  384. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  385. if (!adreno_dev->sptp_pc_enabled)
  386. return;
  387. kgsl_regwrite(device, A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x0000007F);
  388. kgsl_regwrite(device, A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
  389. kgsl_regwrite(device, A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0x000A0080);
  390. kgsl_regwrite(device, A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x00600040);
  391. trace_adreno_sp_tp((unsigned long) __builtin_return_address(0));
  392. };
  393. /*
  394. * The maximum payload of a type4 packet is the max size minus one for the
  395. * opcode
  396. */
  397. #define TYPE4_MAX_PAYLOAD (PM4_TYPE4_PKT_SIZE_MAX - 1)
  398. static int _gpmu_create_load_cmds(struct adreno_device *adreno_dev,
  399. uint32_t *ucode, uint32_t size)
  400. {
  401. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  402. uint32_t *start, *cmds;
  403. uint32_t offset = 0;
  404. uint32_t cmds_size = size;
  405. /* Add a dword for each PM4 packet */
  406. cmds_size += (size / TYPE4_MAX_PAYLOAD) + 1;
  407. /* Add 4 dwords for the protected mode */
  408. cmds_size += 4;
  409. if (adreno_dev->gpmu_cmds != NULL)
  410. return 0;
  411. adreno_dev->gpmu_cmds = devm_kmalloc(&device->pdev->dev,
  412. cmds_size << 2, GFP_KERNEL);
  413. if (adreno_dev->gpmu_cmds == NULL)
  414. return -ENOMEM;
  415. cmds = adreno_dev->gpmu_cmds;
  416. start = cmds;
  417. /* Turn CP protection OFF */
  418. cmds += cp_protected_mode(adreno_dev, cmds, 0);
  419. /*
  420. * Prebuild the cmd stream to send to the GPU to load
  421. * the GPMU firmware
  422. */
  423. while (size > 0) {
  424. int tmp_size = size;
  425. if (size >= TYPE4_MAX_PAYLOAD)
  426. tmp_size = TYPE4_MAX_PAYLOAD;
  427. *cmds++ = cp_type4_packet(
  428. A5XX_GPMU_INST_RAM_BASE + offset,
  429. tmp_size);
  430. memcpy(cmds, &ucode[offset], tmp_size << 2);
  431. cmds += tmp_size;
  432. offset += tmp_size;
  433. size -= tmp_size;
  434. }
  435. /* Turn CP protection ON */
  436. cmds += cp_protected_mode(adreno_dev, cmds, 1);
  437. adreno_dev->gpmu_cmds_size = (size_t) (cmds - start);
  438. return 0;
  439. }
  440. /*
  441. * _load_gpmu_firmware() - Load the ucode into the GPMU RAM
  442. * @adreno_dev: Pointer to adreno device
  443. */
  444. static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
  445. {
  446. uint32_t *data;
  447. const struct firmware *fw = NULL;
  448. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  449. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  450. uint32_t *cmds, cmd_size;
  451. int ret = -EINVAL;
  452. u32 gmu_major = 1;
  453. if (!a5xx_has_gpmu(adreno_dev))
  454. return 0;
  455. /* a530 used GMU major 1 and A540 used GMU major 3 */
  456. if (adreno_is_a540(adreno_dev))
  457. gmu_major = 3;
  458. /* gpmu fw already saved and verified so do nothing new */
  459. if (adreno_dev->gpmu_cmds_size != 0)
  460. return 0;
  461. if (a5xx_core->gpmufw_name == NULL)
  462. return 0;
  463. ret = request_firmware(&fw, a5xx_core->gpmufw_name, &device->pdev->dev);
  464. if (ret || fw == NULL) {
  465. dev_err(&device->pdev->dev,
  466. "request_firmware (%s) failed: %d\n",
  467. a5xx_core->gpmufw_name, ret);
  468. return ret;
  469. }
  470. data = (uint32_t *)fw->data;
  471. if (data[0] >= (fw->size / sizeof(uint32_t)) || data[0] < 2)
  472. goto err;
  473. if (data[1] != GPMU_FIRMWARE_ID)
  474. goto err;
  475. ret = _read_fw2_block_header(device, &data[2],
  476. data[0] - 2, GPMU_FIRMWARE_ID, gmu_major, 0);
  477. if (ret)
  478. goto err;
  479. /* Integer overflow check for cmd_size */
  480. if (data[2] > (data[0] - 2))
  481. goto err;
  482. cmds = data + data[2] + 3;
  483. cmd_size = data[0] - data[2] - 2;
  484. if (cmd_size > GPMU_INST_RAM_SIZE) {
  485. dev_err(device->dev,
  486. "GPMU firmware block size is larger than RAM size\n");
  487. goto err;
  488. }
  489. /* Everything is cool, so create some commands */
  490. ret = _gpmu_create_load_cmds(adreno_dev, cmds, cmd_size);
  491. err:
  492. if (fw)
  493. release_firmware(fw);
  494. return ret;
  495. }
  496. static void a5xx_spin_idle_debug(struct adreno_device *adreno_dev,
  497. const char *str)
  498. {
  499. struct kgsl_device *device = &adreno_dev->dev;
  500. unsigned int rptr, wptr;
  501. unsigned int status, status3, intstatus;
  502. unsigned int hwfault;
  503. dev_err(device->dev, str);
  504. kgsl_regread(device, A5XX_CP_RB_RPTR, &rptr);
  505. kgsl_regread(device, A5XX_CP_RB_WPTR, &wptr);
  506. kgsl_regread(device, A5XX_RBBM_STATUS, &status);
  507. kgsl_regread(device, A5XX_RBBM_STATUS3, &status3);
  508. kgsl_regread(device, A5XX_RBBM_INT_0_STATUS, &intstatus);
  509. kgsl_regread(device, A5XX_CP_HW_FAULT, &hwfault);
  510. dev_err(device->dev,
  511. "rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
  512. adreno_dev->cur_rb->id, rptr, wptr, status, status3, intstatus);
  513. dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
  514. kgsl_device_snapshot(device, NULL, NULL, false);
  515. }
  516. static int _gpmu_send_init_cmds(struct adreno_device *adreno_dev)
  517. {
  518. struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
  519. uint32_t *cmds;
  520. uint32_t size = adreno_dev->gpmu_cmds_size;
  521. int ret;
  522. if (size == 0 || adreno_dev->gpmu_cmds == NULL)
  523. return -EINVAL;
  524. cmds = adreno_ringbuffer_allocspace(rb, size);
  525. if (IS_ERR(cmds))
  526. return PTR_ERR(cmds);
  527. if (cmds == NULL)
  528. return -ENOSPC;
  529. /* Copy to the RB the predefined fw sequence cmds */
  530. memcpy(cmds, adreno_dev->gpmu_cmds, size << 2);
  531. ret = a5xx_ringbuffer_submit(rb, NULL, true);
  532. if (!ret) {
  533. ret = adreno_spin_idle(adreno_dev, 2000);
  534. if (ret)
  535. a5xx_spin_idle_debug(adreno_dev,
  536. "gpmu initialization failed to idle\n");
  537. }
  538. return ret;
  539. }
  540. /*
  541. * a5xx_gpmu_start() - Initialize and start the GPMU
  542. * @adreno_dev: Pointer to adreno device
  543. *
  544. * Load the GPMU microcode, set up any features such as hardware clock gating
  545. * or IFPC, and take the GPMU out of reset.
  546. */
  547. static int a5xx_gpmu_start(struct adreno_device *adreno_dev)
  548. {
  549. int ret;
  550. unsigned int reg, retry = GPMU_FW_INIT_RETRY;
  551. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  552. if (!a5xx_has_gpmu(adreno_dev))
  553. return 0;
  554. ret = _gpmu_send_init_cmds(adreno_dev);
  555. if (ret)
  556. return ret;
  557. if (adreno_is_a530(adreno_dev)) {
  558. /* GPMU clock gating setup */
  559. kgsl_regwrite(device, A5XX_GPMU_WFI_CONFIG, 0x00004014);
  560. }
  561. /* Kick off GPMU firmware */
  562. kgsl_regwrite(device, A5XX_GPMU_CM3_SYSRESET, 0);
  563. /*
  564. * The hardware team's estimation of GPMU firmware initialization
  565. * latency is about 3000 cycles, that's about 5 to 24 usec.
  566. */
  567. do {
  568. udelay(1);
  569. kgsl_regread(device, A5XX_GPMU_GENERAL_0, &reg);
  570. } while ((reg != 0xBABEFACE) && retry--);
  571. if (reg != 0xBABEFACE) {
  572. dev_err(device->dev,
  573. "GPMU firmware initialization timed out\n");
  574. return -ETIMEDOUT;
  575. }
  576. if (!adreno_is_a530(adreno_dev)) {
  577. kgsl_regread(device, A5XX_GPMU_GENERAL_1, &reg);
  578. if (reg) {
  579. dev_err(device->dev,
  580. "GPMU firmware initialization failed: %d\n",
  581. reg);
  582. return -EIO;
  583. }
  584. }
  585. set_bit(ADRENO_DEVICE_GPMU_INITIALIZED, &adreno_dev->priv);
  586. /*
  587. * We are in AWARE state and IRQ line from GPU to host is
  588. * disabled.
  589. * Read pending GPMU interrupts and clear GPMU_RBBM_INTR_INFO.
  590. */
  591. kgsl_regread(device, A5XX_GPMU_RBBM_INTR_INFO, &reg);
  592. /*
  593. * Clear RBBM interrupt mask if any of GPMU interrupts
  594. * are pending.
  595. */
  596. if (reg)
  597. kgsl_regwrite(device,
  598. A5XX_RBBM_INT_CLEAR_CMD,
  599. 1 << A5XX_INT_GPMU_FIRMWARE);
  600. return ret;
  601. }
  602. void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
  603. {
  604. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  605. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  606. int i;
  607. if (!adreno_dev->hwcg_enabled)
  608. return;
  609. for (i = 0; i < a5xx_core->hwcg_count; i++)
  610. kgsl_regwrite(device, a5xx_core->hwcg[i].offset,
  611. on ? a5xx_core->hwcg[i].val : 0);
  612. /* enable top level HWCG */
  613. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL, on ? 0xAAA8AA00 : 0);
  614. kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
  615. }
  616. static int _read_fw2_block_header(struct kgsl_device *device,
  617. uint32_t *header, uint32_t remain,
  618. uint32_t id, uint32_t major, uint32_t minor)
  619. {
  620. uint32_t header_size;
  621. int i = 1;
  622. if (header == NULL)
  623. return -ENOMEM;
  624. header_size = header[0];
  625. /* Headers have limited size and always occur as pairs of words */
  626. if (header_size > MAX_HEADER_SIZE || header_size >= remain ||
  627. header_size % 2 || header_size == 0)
  628. return -EINVAL;
  629. /* Sequences must have an identifying id first thing in their header */
  630. if (id == GPMU_SEQUENCE_ID) {
  631. if (header[i] != HEADER_SEQUENCE ||
  632. (header[i + 1] >= MAX_SEQUENCE_ID))
  633. return -EINVAL;
  634. i += 2;
  635. }
  636. for (; i < header_size; i += 2) {
  637. switch (header[i]) {
  638. /* Major Version */
  639. case HEADER_MAJOR:
  640. if ((major > header[i + 1]) &&
  641. header[i + 1]) {
  642. dev_err(device->dev,
  643. "GPMU major version mis-match %d, %d\n",
  644. major, header[i + 1]);
  645. return -EINVAL;
  646. }
  647. break;
  648. case HEADER_MINOR:
  649. if (minor > header[i + 1])
  650. dev_err(device->dev,
  651. "GPMU minor version mis-match %d %d\n",
  652. minor, header[i + 1]);
  653. break;
  654. case HEADER_DATE:
  655. case HEADER_TIME:
  656. break;
  657. default:
  658. dev_err(device->dev, "GPMU unknown header ID %d\n",
  659. header[i]);
  660. }
  661. }
  662. return 0;
  663. }
  664. /*
  665. * Read in the register sequence file and save pointers to the
  666. * necessary sequences.
  667. *
  668. * GPU sequence file format (one dword per field unless noted):
  669. * Block 1 length (length dword field not inclusive)
  670. * Block 1 type = Sequence = 3
  671. * Block Header length (length dword field not inclusive)
  672. * BH field ID = Sequence field ID
  673. * BH field data = Sequence ID
  674. * BH field ID
  675. * BH field data
  676. * ...
  677. * Opcode 0 ID
  678. * Opcode 0 data M words
  679. * Opcode 1 ID
  680. * Opcode 1 data N words
  681. * ...
  682. * Opcode X ID
  683. * Opcode X data O words
  684. * Block 2 length...
  685. */
  686. static void _load_regfile(struct adreno_device *adreno_dev)
  687. {
  688. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  689. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  690. const struct firmware *fw;
  691. uint64_t block_size = 0, block_total = 0;
  692. uint32_t fw_size, *block;
  693. int ret = -EINVAL;
  694. u32 lm_major = 1;
  695. if (!a5xx_core->regfw_name)
  696. return;
  697. ret = request_firmware(&fw, a5xx_core->regfw_name, &device->pdev->dev);
  698. if (ret) {
  699. dev_err(&device->pdev->dev, "request firmware failed %d, %s\n",
  700. ret, a5xx_core->regfw_name);
  701. return;
  702. }
  703. /* a530v2 lm_major was 3. a530v3 lm_major was 1 */
  704. if (adreno_is_a530v2(adreno_dev))
  705. lm_major = 3;
  706. fw_size = fw->size / sizeof(uint32_t);
  707. /* Min valid file of size 6, see file description */
  708. if (fw_size < 6)
  709. goto err;
  710. block = (uint32_t *)fw->data;
  711. /* All offset numbers calculated from file description */
  712. while (block_total < fw_size) {
  713. block_size = block[0];
  714. if (((block_total + block_size) >= fw_size)
  715. || block_size < 5)
  716. goto err;
  717. if (block[1] != GPMU_SEQUENCE_ID)
  718. goto err;
  719. /* For now ignore blocks other than the LM sequence */
  720. if (block[4] == LM_SEQUENCE_ID) {
  721. ret = _read_fw2_block_header(device, &block[2],
  722. block_size - 2, GPMU_SEQUENCE_ID,
  723. lm_major, 0);
  724. if (ret)
  725. goto err;
  726. if (block[2] > (block_size - 2))
  727. goto err;
  728. adreno_dev->lm_sequence = block + block[2] + 3;
  729. adreno_dev->lm_size = block_size - block[2] - 2;
  730. }
  731. block_total += (block_size + 1);
  732. block += (block_size + 1);
  733. }
  734. if (adreno_dev->lm_sequence)
  735. return;
  736. err:
  737. release_firmware(fw);
  738. dev_err(device->dev,
  739. "Register file failed to load sz=%d bsz=%llu header=%d\n",
  740. fw_size, block_size, ret);
  741. }
  742. static int _execute_reg_sequence(struct adreno_device *adreno_dev,
  743. uint32_t *opcode, uint32_t length)
  744. {
  745. uint32_t *cur = opcode;
  746. uint64_t reg, val;
  747. /* todo double check the reg writes */
  748. while ((cur - opcode) < length) {
  749. if (cur[0] == 1 && (length - (cur - opcode) >= 4)) {
  750. /* Write a 32 bit value to a 64 bit reg */
  751. reg = cur[2];
  752. reg = (reg << 32) | cur[1];
  753. kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, cur[3]);
  754. cur += 4;
  755. } else if (cur[0] == 2 && (length - (cur - opcode) >= 5)) {
  756. /* Write a 64 bit value to a 64 bit reg */
  757. reg = cur[2];
  758. reg = (reg << 32) | cur[1];
  759. val = cur[4];
  760. val = (val << 32) | cur[3];
  761. kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, val);
  762. cur += 5;
  763. } else if (cur[0] == 3 && (length - (cur - opcode) >= 2)) {
  764. /* Delay for X usec */
  765. udelay(cur[1]);
  766. cur += 2;
  767. } else
  768. return -EINVAL;
  769. }
  770. return 0;
  771. }
  772. static uint32_t _write_voltage_table(struct adreno_device *adreno_dev,
  773. unsigned int addr)
  774. {
  775. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  776. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  777. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  778. int i;
  779. struct dev_pm_opp *opp;
  780. unsigned int mvolt = 0;
  781. kgsl_regwrite(device, addr++, a5xx_core->max_power);
  782. kgsl_regwrite(device, addr++, pwr->num_pwrlevels);
  783. /* Write voltage in mV and frequency in MHz */
  784. for (i = 0; i < pwr->num_pwrlevels; i++) {
  785. opp = dev_pm_opp_find_freq_exact(&device->pdev->dev,
  786. pwr->pwrlevels[i].gpu_freq, true);
  787. /* _opp_get returns uV, convert to mV */
  788. if (!IS_ERR(opp)) {
  789. mvolt = dev_pm_opp_get_voltage(opp) / 1000;
  790. dev_pm_opp_put(opp);
  791. }
  792. kgsl_regwrite(device, addr++, mvolt);
  793. kgsl_regwrite(device, addr++,
  794. pwr->pwrlevels[i].gpu_freq / 1000000);
  795. }
  796. return (pwr->num_pwrlevels * 2 + 2);
  797. }
  798. static uint32_t lm_limit(struct adreno_device *adreno_dev)
  799. {
  800. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  801. if (adreno_dev->lm_limit)
  802. return adreno_dev->lm_limit;
  803. if (of_property_read_u32(device->pdev->dev.of_node, "qcom,lm-limit",
  804. &adreno_dev->lm_limit))
  805. adreno_dev->lm_limit = LM_DEFAULT_LIMIT;
  806. return adreno_dev->lm_limit;
  807. }
  808. /*
  809. * a5xx_lm_init() - Initialize LM/DPM on the GPMU
  810. * @adreno_dev: The adreno device pointer
  811. */
  812. static void a530_lm_init(struct adreno_device *adreno_dev)
  813. {
  814. uint32_t length;
  815. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  816. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  817. if (!adreno_dev->lm_enabled)
  818. return;
  819. /* If something was wrong with the sequence file, return */
  820. if (adreno_dev->lm_sequence == NULL)
  821. return;
  822. /* Write LM registers including DPM ucode, coefficients, and config */
  823. if (_execute_reg_sequence(adreno_dev, adreno_dev->lm_sequence,
  824. adreno_dev->lm_size)) {
  825. /* If the sequence is invalid, it's not getting better */
  826. adreno_dev->lm_sequence = NULL;
  827. dev_warn(device->dev,
  828. "Invalid LM sequence\n");
  829. return;
  830. }
  831. kgsl_regwrite(device, A5XX_GPMU_TEMP_SENSOR_ID, a5xx_core->gpmu_tsens);
  832. kgsl_regwrite(device, A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x1);
  833. kgsl_regwrite(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x1);
  834. kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE,
  835. (0x80000000 | device->pwrctrl.active_pwrlevel));
  836. /* use the leakage to set this value at runtime */
  837. kgsl_regwrite(device, A5XX_GPMU_BASE_LEAKAGE,
  838. adreno_dev->lm_leakage);
  839. /* Enable the power threshold and set it to 6000m */
  840. kgsl_regwrite(device, A5XX_GPMU_GPMU_PWR_THRESHOLD,
  841. 0x80000000 | lm_limit(adreno_dev));
  842. kgsl_regwrite(device, A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
  843. kgsl_regwrite(device, A5XX_GDPM_CONFIG1, 0x00201FF1);
  844. /* Send an initial message to the GPMU with the LM voltage table */
  845. kgsl_regwrite(device, AGC_MSG_STATE, 1);
  846. kgsl_regwrite(device, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
  847. length = _write_voltage_table(adreno_dev, AGC_MSG_PAYLOAD);
  848. kgsl_regwrite(device, AGC_MSG_PAYLOAD_SIZE, length * sizeof(uint32_t));
  849. kgsl_regwrite(device, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
  850. }
  851. /*
  852. * a5xx_lm_enable() - Enable the LM/DPM feature on the GPMU
  853. * @adreno_dev: The adreno device pointer
  854. */
  855. static void a530_lm_enable(struct adreno_device *adreno_dev)
  856. {
  857. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  858. if (!adreno_dev->lm_enabled)
  859. return;
  860. /* If no sequence properly initialized, return */
  861. if (adreno_dev->lm_sequence == NULL)
  862. return;
  863. kgsl_regwrite(device, A5XX_GDPM_INT_MASK, 0x00000000);
  864. kgsl_regwrite(device, A5XX_GDPM_INT_EN, 0x0000000A);
  865. kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x00000001);
  866. kgsl_regwrite(device, A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK,
  867. 0x00050000);
  868. kgsl_regwrite(device, A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL,
  869. 0x00030000);
  870. if (adreno_is_a530(adreno_dev))
  871. /* Program throttle control, do not enable idle DCS on v3+ */
  872. kgsl_regwrite(device, A5XX_GPMU_CLOCK_THROTTLE_CTRL,
  873. adreno_is_a530v2(adreno_dev) ? 0x00060011 : 0x00000011);
  874. }
  875. static void a540_lm_init(struct adreno_device *adreno_dev)
  876. {
  877. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  878. uint32_t agc_lm_config = AGC_BCL_DISABLED |
  879. ((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3)
  880. << AGC_GPU_VERSION_SHIFT);
  881. unsigned int r;
  882. if (!adreno_dev->throttling_enabled)
  883. agc_lm_config |= AGC_THROTTLE_DISABLE;
  884. if (adreno_dev->lm_enabled) {
  885. agc_lm_config |=
  886. AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE |
  887. AGC_LM_CONFIG_ISENSE_ENABLE;
  888. kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
  889. if ((r & GPMU_ISENSE_STATUS) == GPMU_ISENSE_END_POINT_CAL_ERR) {
  890. dev_err(device->dev,
  891. "GPMU: ISENSE end point calibration failure\n");
  892. agc_lm_config |= AGC_LM_CONFIG_ENABLE_ERROR;
  893. }
  894. }
  895. kgsl_regwrite(device, AGC_MSG_STATE, 0x80000001);
  896. kgsl_regwrite(device, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
  897. (void) _write_voltage_table(adreno_dev, AGC_MSG_PAYLOAD);
  898. kgsl_regwrite(device, AGC_MSG_PAYLOAD + AGC_LM_CONFIG, agc_lm_config);
  899. kgsl_regwrite(device, AGC_MSG_PAYLOAD + AGC_LEVEL_CONFIG,
  900. (unsigned int) ~(GENMASK(LM_DCVS_LIMIT, 0) |
  901. GENMASK(16+LM_DCVS_LIMIT, 16)));
  902. kgsl_regwrite(device, AGC_MSG_PAYLOAD_SIZE,
  903. (AGC_LEVEL_CONFIG + 1) * sizeof(uint32_t));
  904. kgsl_regwrite(device, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
  905. kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE,
  906. (0x80000000 | device->pwrctrl.active_pwrlevel));
  907. kgsl_regwrite(device, A5XX_GPMU_GPMU_PWR_THRESHOLD,
  908. PWR_THRESHOLD_VALID | lm_limit(adreno_dev));
  909. kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK,
  910. VOLTAGE_INTR_EN);
  911. }
  912. static void a5xx_lm_enable(struct adreno_device *adreno_dev)
  913. {
  914. if (adreno_is_a530(adreno_dev))
  915. a530_lm_enable(adreno_dev);
  916. }
  917. static void a5xx_lm_init(struct adreno_device *adreno_dev)
  918. {
  919. if (adreno_is_a530(adreno_dev))
  920. a530_lm_init(adreno_dev);
  921. else if (adreno_is_a540(adreno_dev))
  922. a540_lm_init(adreno_dev);
  923. }
  924. static int gpmu_set_level(struct adreno_device *adreno_dev, unsigned int val)
  925. {
  926. unsigned int reg;
  927. int retry = 100;
  928. kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_GPMU_GPMU_VOLTAGE, val);
  929. do {
  930. kgsl_regread(KGSL_DEVICE(adreno_dev), A5XX_GPMU_GPMU_VOLTAGE,
  931. &reg);
  932. } while ((reg & 0x80000000) && retry--);
  933. return (reg & 0x80000000) ? -ETIMEDOUT : 0;
  934. }
  935. /*
  936. * a5xx_pwrlevel_change_settings() - Program the hardware during power level
  937. * transitions
  938. * @adreno_dev: The adreno device pointer
  939. * @prelevel: The previous power level
  940. * @postlevel: The new power level
  941. * @post: True if called after the clock change has taken effect
  942. */
  943. static void a5xx_pwrlevel_change_settings(struct adreno_device *adreno_dev,
  944. unsigned int prelevel, unsigned int postlevel,
  945. bool post)
  946. {
  947. /*
  948. * On pre A540 HW only call through if LMx is supported and enabled, and
  949. * always call through for a540
  950. */
  951. if (!adreno_is_a540(adreno_dev) && !adreno_dev->lm_enabled)
  952. return;
  953. if (!post) {
  954. if (gpmu_set_level(adreno_dev, (0x80000010 | postlevel)))
  955. dev_err(KGSL_DEVICE(adreno_dev)->dev,
  956. "GPMU pre powerlevel did not stabilize\n");
  957. } else {
  958. if (gpmu_set_level(adreno_dev, (0x80000000 | postlevel)))
  959. dev_err(KGSL_DEVICE(adreno_dev)->dev,
  960. "GPMU post powerlevel did not stabilize\n");
  961. }
  962. }
  963. /* FW driven idle 10% throttle */
  964. #define IDLE_10PCT 0
  965. /* number of cycles when clock is throttled by 50% (CRC) */
  966. #define CRC_50PCT 1
  967. /* number of cycles when clock is throttled by more than 50% (CRC) */
  968. #define CRC_MORE50PCT 2
  969. /* number of cycles when clock is throttle by less than 50% (CRC) */
  970. #define CRC_LESS50PCT 3
  971. static int64_t a5xx_read_throttling_counters(struct adreno_device *adreno_dev)
  972. {
  973. int i;
  974. int64_t adj;
  975. uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
  976. struct adreno_busy_data *busy = &adreno_dev->busy_data;
  977. if (!adreno_dev->throttling_enabled)
  978. return 0;
  979. for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
  980. if (!adreno_dev->gpmu_throttle_counters[i])
  981. return 0;
  982. th[i] = counter_delta(KGSL_DEVICE(adreno_dev),
  983. adreno_dev->gpmu_throttle_counters[i],
  984. &busy->throttle_cycles[i]);
  985. }
  986. adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
  987. adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
  988. trace_kgsl_clock_throttling(
  989. th[IDLE_10PCT], th[CRC_50PCT],
  990. th[CRC_MORE50PCT], th[CRC_LESS50PCT],
  991. adj);
  992. return adj;
  993. }
  994. /*
  995. * a5xx_gpmu_reset() - Re-enable GPMU based power features and restart GPMU
  996. * @work: Pointer to the work struct for gpmu reset
  997. *
  998. * Load the GPMU microcode, set up any features such as hardware clock gating
  999. * or IFPC, and take the GPMU out of reset.
  1000. */
  1001. static void a5xx_gpmu_reset(struct work_struct *work)
  1002. {
  1003. struct adreno_device *adreno_dev = container_of(work,
  1004. struct adreno_device, gpmu_work);
  1005. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1006. if (test_bit(ADRENO_DEVICE_GPMU_INITIALIZED, &adreno_dev->priv))
  1007. return;
  1008. /*
  1009. * If GPMU has already experienced a restart or is in the process of it
  1010. * after the watchdog timeout, then there is no need to reset GPMU
  1011. * again.
  1012. */
  1013. if (device->state != KGSL_STATE_AWARE && device->state != KGSL_STATE_ACTIVE)
  1014. return;
  1015. mutex_lock(&device->mutex);
  1016. if (a5xx_regulator_enable(adreno_dev))
  1017. goto out;
  1018. /* Soft reset of the GPMU block */
  1019. kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, BIT(16));
  1020. /* GPU comes up in secured mode, make it unsecured by default */
  1021. if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
  1022. kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
  1023. a5xx_gpmu_init(adreno_dev);
  1024. out:
  1025. mutex_unlock(&device->mutex);
  1026. }
  1027. static void _setup_throttling_counters(struct adreno_device *adreno_dev)
  1028. {
  1029. int i, ret = 0;
  1030. if (!adreno_is_a540(adreno_dev))
  1031. return;
  1032. for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
  1033. /* reset throttled cycles ivalue */
  1034. adreno_dev->busy_data.throttle_cycles[i] = 0;
  1035. /* Throttle countables start at off set 43 */
  1036. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  1037. KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 43 + i,
  1038. &adreno_dev->gpmu_throttle_counters[i], NULL);
  1039. }
  1040. WARN_ONCE(ret, "Unable to get one or more clock throttling registers\n");
  1041. }
  1042. /*
  1043. * a5xx_start() - Device start
  1044. * @adreno_dev: Pointer to adreno device
  1045. *
  1046. * a5xx device start
  1047. */
  1048. static int a5xx_start(struct adreno_device *adreno_dev)
  1049. {
  1050. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1051. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  1052. unsigned int bit;
  1053. int ret;
  1054. ret = kgsl_mmu_start(device);
  1055. if (ret)
  1056. return ret;
  1057. adreno_get_bus_counters(adreno_dev);
  1058. adreno_perfcounter_restore(adreno_dev);
  1059. if (adreno_is_a530(adreno_dev) &&
  1060. ADRENO_FEATURE(adreno_dev, ADRENO_LM))
  1061. adreno_perfcounter_kernel_get(adreno_dev,
  1062. KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27,
  1063. &adreno_dev->lm_threshold_count, NULL);
  1064. /* Enable 64 bit addressing */
  1065. kgsl_regwrite(device, A5XX_CP_ADDR_MODE_CNTL, 0x1);
  1066. kgsl_regwrite(device, A5XX_VSC_ADDR_MODE_CNTL, 0x1);
  1067. kgsl_regwrite(device, A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
  1068. kgsl_regwrite(device, A5XX_RB_ADDR_MODE_CNTL, 0x1);
  1069. kgsl_regwrite(device, A5XX_PC_ADDR_MODE_CNTL, 0x1);
  1070. kgsl_regwrite(device, A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
  1071. kgsl_regwrite(device, A5XX_VFD_ADDR_MODE_CNTL, 0x1);
  1072. kgsl_regwrite(device, A5XX_VPC_ADDR_MODE_CNTL, 0x1);
  1073. kgsl_regwrite(device, A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
  1074. kgsl_regwrite(device, A5XX_SP_ADDR_MODE_CNTL, 0x1);
  1075. kgsl_regwrite(device, A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
  1076. kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
  1077. _setup_throttling_counters(adreno_dev);
  1078. /* Set up VBIF registers from the GPU core definition */
  1079. kgsl_regmap_multi_write(&device->regmap, a5xx_core->vbif,
  1080. a5xx_core->vbif_count);
  1081. /* Make all blocks contribute to the GPU BUSY perf counter */
  1082. kgsl_regwrite(device, A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
  1083. /* Program RBBM counter 0 to report GPU busy for frequency scaling */
  1084. kgsl_regwrite(device, A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
  1085. /*
  1086. * Enable the RBBM error reporting bits. This lets us get
  1087. * useful information on failure
  1088. */
  1089. kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL0, 0x00000001);
  1090. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_FAULT_DETECT_MASK)) {
  1091. /*
  1092. * We have 4 RB units, and only RB0 activity signals are
  1093. * working correctly. Mask out RB1-3 activity signals
  1094. * from the HW hang detection logic as per
  1095. * recommendation of hardware team.
  1096. */
  1097. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
  1098. 0xF0000000);
  1099. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
  1100. 0xFFFFFFFF);
  1101. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
  1102. 0xFFFFFFFF);
  1103. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
  1104. 0xFFFFFFFF);
  1105. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
  1106. 0xFFFFFFFF);
  1107. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
  1108. 0xFFFFFFFF);
  1109. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
  1110. 0xFFFFFFFF);
  1111. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
  1112. 0xFFFFFFFF);
  1113. }
  1114. /*
  1115. * Set hang detection threshold to 4 million cycles
  1116. * (0x3FFFF*16)
  1117. */
  1118. kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
  1119. (1 << 30) | 0x3FFFF);
  1120. /* Turn on performance counters */
  1121. kgsl_regwrite(device, A5XX_RBBM_PERFCTR_CNTL, 0x01);
  1122. /*
  1123. * This is to increase performance by restricting VFD's cache access,
  1124. * so that LRZ and other data get evicted less.
  1125. */
  1126. kgsl_regwrite(device, A5XX_UCHE_CACHE_WAYS, 0x02);
  1127. /*
  1128. * Set UCHE_WRITE_THRU_BASE to the UCHE_TRAP_BASE effectively
  1129. * disabling L2 bypass
  1130. */
  1131. kgsl_regwrite(device, A5XX_UCHE_TRAP_BASE_LO, 0xffff0000);
  1132. kgsl_regwrite(device, A5XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
  1133. kgsl_regwrite(device, A5XX_UCHE_WRITE_THRU_BASE_LO, 0xffff0000);
  1134. kgsl_regwrite(device, A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
  1135. /* Program the GMEM VA range for the UCHE path */
  1136. kgsl_regwrite(device, A5XX_UCHE_GMEM_RANGE_MIN_LO,
  1137. adreno_dev->uche_gmem_base);
  1138. kgsl_regwrite(device, A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x0);
  1139. kgsl_regwrite(device, A5XX_UCHE_GMEM_RANGE_MAX_LO,
  1140. adreno_dev->uche_gmem_base +
  1141. adreno_dev->gpucore->gmem_size - 1);
  1142. kgsl_regwrite(device, A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x0);
  1143. /*
  1144. * Below CP registers are 0x0 by default, program init
  1145. * values based on a5xx flavor.
  1146. */
  1147. if (adreno_is_a505_or_a506(adreno_dev) || adreno_is_a508(adreno_dev)) {
  1148. kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x20);
  1149. kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x400);
  1150. kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
  1151. kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
  1152. } else if (adreno_is_a510(adreno_dev)) {
  1153. kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x20);
  1154. kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x20);
  1155. kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
  1156. kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
  1157. } else if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev)) {
  1158. kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x40);
  1159. kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x400);
  1160. kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
  1161. kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
  1162. } else {
  1163. kgsl_regwrite(device, A5XX_CP_MEQ_THRESHOLDS, 0x40);
  1164. kgsl_regwrite(device, A5XX_CP_MERCIU_SIZE, 0x40);
  1165. kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
  1166. kgsl_regwrite(device, A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
  1167. }
  1168. /*
  1169. * vtxFifo and primFifo thresholds default values
  1170. * are different.
  1171. */
  1172. if (adreno_is_a505_or_a506(adreno_dev) || adreno_is_a508(adreno_dev))
  1173. kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL,
  1174. (0x100 << 11 | 0x100 << 22));
  1175. else if (adreno_is_a510(adreno_dev) || adreno_is_a512(adreno_dev))
  1176. kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL,
  1177. (0x200 << 11 | 0x200 << 22));
  1178. else
  1179. kgsl_regwrite(device, A5XX_PC_DBG_ECO_CNTL,
  1180. (0x400 << 11 | 0x300 << 22));
  1181. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI)) {
  1182. /*
  1183. * Set TWOPASSUSEWFI in A5XX_PC_DBG_ECO_CNTL for
  1184. * microcodes after v77
  1185. */
  1186. if ((adreno_compare_pfp_version(adreno_dev, 0x5FF077) >= 0))
  1187. kgsl_regrmw(device, A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
  1188. }
  1189. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING)) {
  1190. /*
  1191. * Disable RB sampler datapath DP2 clock gating
  1192. * optimization for 1-SP GPU's, by default it is enabled.
  1193. */
  1194. kgsl_regrmw(device, A5XX_RB_DBG_ECO_CNT, 0, (1 << 9));
  1195. }
  1196. /*
  1197. * Disable UCHE global filter as SP can invalidate/flush
  1198. * independently
  1199. */
  1200. kgsl_regwrite(device, A5XX_UCHE_MODE_CNTL, BIT(29));
  1201. /* Set the USE_RETENTION_FLOPS chicken bit */
  1202. kgsl_regwrite(device, A5XX_CP_CHICKEN_DBG, 0x02000000);
  1203. /* Enable ISDB mode if requested */
  1204. if (test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv)) {
  1205. if (!adreno_active_count_get(adreno_dev)) {
  1206. /*
  1207. * Disable ME/PFP split timeouts when the debugger is
  1208. * enabled because the CP doesn't know when a shader is
  1209. * in active debug
  1210. */
  1211. kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL1, 0x06FFFFFF);
  1212. /* Force the SP0/SP1 clocks on to enable ISDB */
  1213. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL_SP0, 0x0);
  1214. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL_SP1, 0x0);
  1215. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL_SP2, 0x0);
  1216. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL_SP3, 0x0);
  1217. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL2_SP0, 0x0);
  1218. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL2_SP1, 0x0);
  1219. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL2_SP2, 0x0);
  1220. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL2_SP3, 0x0);
  1221. /* disable HWCG */
  1222. kgsl_regwrite(device, A5XX_RBBM_CLOCK_CNTL, 0x0);
  1223. kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, 0x0);
  1224. } else
  1225. dev_err(device->dev,
  1226. "Active count failed while turning on ISDB\n");
  1227. } else {
  1228. /* if not in ISDB mode enable ME/PFP split notification */
  1229. kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
  1230. }
  1231. kgsl_regwrite(device, A5XX_RBBM_AHB_CNTL2, 0x0000003F);
  1232. bit = adreno_dev->highest_bank_bit ?
  1233. (adreno_dev->highest_bank_bit - 13) & 0x03 : 0;
  1234. /*
  1235. * Program the highest DDR bank bit that was passed in
  1236. * from the DT in a handful of registers. Some of these
  1237. * registers will also be written by the UMD, but we
  1238. * want to program them in case we happen to use the
  1239. * UCHE before the UMD does
  1240. */
  1241. kgsl_regwrite(device, A5XX_TPL1_MODE_CNTL, bit << 7);
  1242. kgsl_regwrite(device, A5XX_RB_MODE_CNTL, bit << 1);
  1243. if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev))
  1244. kgsl_regwrite(device, A5XX_UCHE_DBG_ECO_CNTL_2, bit);
  1245. /* Disable All flat shading optimization */
  1246. kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 10);
  1247. /*
  1248. * VPC corner case with local memory load kill leads to corrupt
  1249. * internal state. Normal Disable does not work for all a5x chips.
  1250. * So do the following setting to disable it.
  1251. */
  1252. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_DISABLE_LMLOADKILL)) {
  1253. kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 23);
  1254. kgsl_regrmw(device, A5XX_HLSQ_DBG_ECO_CNTL, 0x1 << 18, 0);
  1255. }
  1256. if (device->mmu.secured) {
  1257. kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_CNTL, 0x0);
  1258. kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
  1259. lower_32_bits(KGSL_IOMMU_SECURE_BASE32));
  1260. kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
  1261. upper_32_bits(KGSL_IOMMU_SECURE_BASE32));
  1262. kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE,
  1263. FIELD_PREP(GENMASK(31, 12),
  1264. (KGSL_IOMMU_SECURE_SIZE(&device->mmu) / SZ_4K)));
  1265. }
  1266. a5xx_preemption_start(adreno_dev);
  1267. a5xx_protect_init(adreno_dev);
  1268. return 0;
  1269. }
  1270. /*
  1271. * Follow the ME_INIT sequence with a preemption yield to allow the GPU to move
  1272. * to a different ringbuffer, if desired
  1273. */
  1274. static int _preemption_init(
  1275. struct adreno_device *adreno_dev,
  1276. struct adreno_ringbuffer *rb, unsigned int *cmds,
  1277. struct kgsl_context *context)
  1278. {
  1279. unsigned int *cmds_orig = cmds;
  1280. uint64_t gpuaddr = rb->preemption_desc->gpuaddr;
  1281. /* Turn CP protection OFF */
  1282. cmds += cp_protected_mode(adreno_dev, cmds, 0);
  1283. /*
  1284. * CP during context switch will save context switch info to
  1285. * a5xx_cp_preemption_record pointed by CONTEXT_SWITCH_SAVE_ADDR
  1286. */
  1287. *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 1);
  1288. *cmds++ = lower_32_bits(gpuaddr);
  1289. *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI, 1);
  1290. *cmds++ = upper_32_bits(gpuaddr);
  1291. /* Turn CP protection ON */
  1292. cmds += cp_protected_mode(adreno_dev, cmds, 1);
  1293. *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_GLOBAL, 1);
  1294. *cmds++ = 0;
  1295. *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
  1296. *cmds++ = 1;
  1297. /* Enable yield in RB only */
  1298. *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
  1299. *cmds++ = 1;
  1300. *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
  1301. cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
  1302. *cmds++ = 0;
  1303. /* generate interrupt on preemption completion */
  1304. *cmds++ = 1;
  1305. return cmds - cmds_orig;
  1306. }
  1307. static int a5xx_post_start(struct adreno_device *adreno_dev)
  1308. {
  1309. int ret;
  1310. unsigned int *cmds, *start;
  1311. struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
  1312. if (!adreno_is_a530(adreno_dev) &&
  1313. !adreno_is_preemption_enabled(adreno_dev))
  1314. return 0;
  1315. cmds = adreno_ringbuffer_allocspace(rb, 42);
  1316. if (IS_ERR(cmds)) {
  1317. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1318. dev_err(device->dev,
  1319. "error allocating preemtion init cmds\n");
  1320. return PTR_ERR(cmds);
  1321. }
  1322. start = cmds;
  1323. /*
  1324. * Send a pipeline stat event whenever the GPU gets powered up
  1325. * to cause misbehaving perf counters to start ticking
  1326. */
  1327. if (adreno_is_a530(adreno_dev)) {
  1328. *cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
  1329. *cmds++ = 0xF;
  1330. }
  1331. if (adreno_is_preemption_enabled(adreno_dev)) {
  1332. cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
  1333. rb->_wptr = rb->_wptr - (42 - (cmds - start));
  1334. ret = a5xx_ringbuffer_submit(rb, NULL, false);
  1335. } else {
  1336. rb->_wptr = rb->_wptr - (42 - (cmds - start));
  1337. ret = a5xx_ringbuffer_submit(rb, NULL, true);
  1338. }
  1339. if (!ret) {
  1340. ret = adreno_spin_idle(adreno_dev, 2000);
  1341. if (ret)
  1342. a5xx_spin_idle_debug(adreno_dev,
  1343. "hw initialization failed to idle\n");
  1344. }
  1345. return ret;
  1346. }
  1347. static int a5xx_gpmu_init(struct adreno_device *adreno_dev)
  1348. {
  1349. int ret;
  1350. /* Set up LM before initializing the GPMU */
  1351. a5xx_lm_init(adreno_dev);
  1352. /* Enable SPTP based power collapse before enabling GPMU */
  1353. a5xx_enable_pc(adreno_dev);
  1354. ret = a5xx_gpmu_start(adreno_dev);
  1355. if (ret)
  1356. return ret;
  1357. /* Enable limits management */
  1358. a5xx_lm_enable(adreno_dev);
  1359. return 0;
  1360. }
  1361. static int a5xx_zap_shader_resume(struct kgsl_device *device)
  1362. {
  1363. int ret = qcom_scm_set_remote_state(0, 13);
  1364. if (ret)
  1365. dev_err(device->dev,
  1366. "SCM zap resume call failed: %d\n", ret);
  1367. return ret;
  1368. }
  1369. /*
  1370. * a5xx_microcode_load() - Load microcode
  1371. * @adreno_dev: Pointer to adreno device
  1372. */
  1373. static int a5xx_microcode_load(struct adreno_device *adreno_dev)
  1374. {
  1375. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1376. struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
  1377. struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
  1378. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  1379. uint64_t gpuaddr;
  1380. gpuaddr = pm4_fw->memdesc->gpuaddr;
  1381. kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_LO,
  1382. lower_32_bits(gpuaddr));
  1383. kgsl_regwrite(device, A5XX_CP_PM4_INSTR_BASE_HI,
  1384. upper_32_bits(gpuaddr));
  1385. gpuaddr = pfp_fw->memdesc->gpuaddr;
  1386. kgsl_regwrite(device, A5XX_CP_PFP_INSTR_BASE_LO,
  1387. lower_32_bits(gpuaddr));
  1388. kgsl_regwrite(device, A5XX_CP_PFP_INSTR_BASE_HI,
  1389. upper_32_bits(gpuaddr));
  1390. /*
  1391. * Do not invoke to load zap shader if MMU does
  1392. * not support secure mode.
  1393. */
  1394. if (!device->mmu.secured)
  1395. return 0;
  1396. if (adreno_dev->zap_loaded && !(ADRENO_FEATURE(adreno_dev,
  1397. ADRENO_CPZ_RETENTION)))
  1398. return a5xx_zap_shader_resume(device);
  1399. return adreno_zap_shader_load(adreno_dev, a5xx_core->zap_name);
  1400. }
  1401. static int _me_init_ucode_workarounds(struct adreno_device *adreno_dev)
  1402. {
  1403. switch (ADRENO_GPUREV(adreno_dev)) {
  1404. case ADRENO_REV_A510:
  1405. return 0x00000001; /* Ucode workaround for token end syncs */
  1406. case ADRENO_REV_A505:
  1407. case ADRENO_REV_A506:
  1408. case ADRENO_REV_A530:
  1409. /*
  1410. * Ucode workarounds for token end syncs,
  1411. * WFI after every direct-render 3D mode draw and
  1412. * WFI after every 2D Mode 3 draw.
  1413. */
  1414. return 0x0000000B;
  1415. default:
  1416. return 0x00000000; /* No ucode workarounds enabled */
  1417. }
  1418. }
  1419. /*
  1420. * CP_INIT_MAX_CONTEXT bit tells if the multiple hardware contexts can
  1421. * be used at once of if they should be serialized
  1422. */
  1423. #define CP_INIT_MAX_CONTEXT BIT(0)
  1424. /* Enables register protection mode */
  1425. #define CP_INIT_ERROR_DETECTION_CONTROL BIT(1)
  1426. /* Header dump information */
  1427. #define CP_INIT_HEADER_DUMP BIT(2) /* Reserved */
  1428. /* Default Reset states enabled for PFP and ME */
  1429. #define CP_INIT_DEFAULT_RESET_STATE BIT(3)
  1430. /* Drawcall filter range */
  1431. #define CP_INIT_DRAWCALL_FILTER_RANGE BIT(4)
  1432. /* Ucode workaround masks */
  1433. #define CP_INIT_UCODE_WORKAROUND_MASK BIT(5)
  1434. #define CP_INIT_MASK (CP_INIT_MAX_CONTEXT | \
  1435. CP_INIT_ERROR_DETECTION_CONTROL | \
  1436. CP_INIT_HEADER_DUMP | \
  1437. CP_INIT_DEFAULT_RESET_STATE | \
  1438. CP_INIT_UCODE_WORKAROUND_MASK)
  1439. static int a5xx_critical_packet_submit(struct adreno_device *adreno_dev,
  1440. struct adreno_ringbuffer *rb)
  1441. {
  1442. unsigned int *cmds;
  1443. int ret;
  1444. if (!critical_packet_constructed)
  1445. return 0;
  1446. cmds = adreno_ringbuffer_allocspace(rb, 4);
  1447. if (IS_ERR(cmds))
  1448. return PTR_ERR(cmds);
  1449. *cmds++ = cp_mem_packet(adreno_dev, CP_INDIRECT_BUFFER_PFE, 2, 1);
  1450. cmds += cp_gpuaddr(adreno_dev, cmds, adreno_dev->critpkts->gpuaddr);
  1451. *cmds++ = crit_pkts_dwords;
  1452. ret = a5xx_ringbuffer_submit(rb, NULL, true);
  1453. if (!ret) {
  1454. ret = adreno_spin_idle(adreno_dev, 20);
  1455. if (ret)
  1456. a5xx_spin_idle_debug(adreno_dev,
  1457. "Critical packet submission failed to idle\n");
  1458. }
  1459. return ret;
  1460. }
  1461. /*
  1462. * a5xx_send_me_init() - Initialize ringbuffer
  1463. * @adreno_dev: Pointer to adreno device
  1464. * @rb: Pointer to the ringbuffer of device
  1465. *
  1466. * Submit commands for ME initialization,
  1467. */
  1468. static int a5xx_send_me_init(struct adreno_device *adreno_dev,
  1469. struct adreno_ringbuffer *rb)
  1470. {
  1471. unsigned int *cmds;
  1472. int i = 0, ret;
  1473. cmds = adreno_ringbuffer_allocspace(rb, 9);
  1474. if (IS_ERR(cmds))
  1475. return PTR_ERR(cmds);
  1476. cmds[i++] = cp_type7_packet(CP_ME_INIT, 8);
  1477. /* Enabled ordinal mask */
  1478. cmds[i++] = CP_INIT_MASK;
  1479. if (CP_INIT_MASK & CP_INIT_MAX_CONTEXT)
  1480. cmds[i++] = 0x00000003;
  1481. if (CP_INIT_MASK & CP_INIT_ERROR_DETECTION_CONTROL)
  1482. cmds[i++] = 0x20000000;
  1483. if (CP_INIT_MASK & CP_INIT_HEADER_DUMP) {
  1484. /* Header dump address */
  1485. cmds[i++] = 0x00000000;
  1486. /* Header dump enable and dump size */
  1487. cmds[i++] = 0x00000000;
  1488. }
  1489. if (CP_INIT_MASK & CP_INIT_DRAWCALL_FILTER_RANGE) {
  1490. /* Start range */
  1491. cmds[i++] = 0x00000000;
  1492. /* End range (inclusive) */
  1493. cmds[i++] = 0x00000000;
  1494. }
  1495. if (CP_INIT_MASK & CP_INIT_UCODE_WORKAROUND_MASK)
  1496. cmds[i++] = _me_init_ucode_workarounds(adreno_dev);
  1497. ret = a5xx_ringbuffer_submit(rb, NULL, true);
  1498. if (!ret) {
  1499. ret = adreno_spin_idle(adreno_dev, 2000);
  1500. if (ret)
  1501. a5xx_spin_idle_debug(adreno_dev,
  1502. "CP initialization failed to idle\n");
  1503. }
  1504. return ret;
  1505. }
  1506. /*
  1507. * a5xx_rb_start() - Start the ringbuffer
  1508. * @adreno_dev: Pointer to adreno device
  1509. */
  1510. static int a5xx_rb_start(struct adreno_device *adreno_dev)
  1511. {
  1512. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1513. struct adreno_ringbuffer *rb;
  1514. uint64_t addr;
  1515. unsigned int *cmds;
  1516. int ret, i;
  1517. /* Clear all the ringbuffers */
  1518. FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
  1519. memset(rb->buffer_desc->hostptr, 0xaa, KGSL_RB_SIZE);
  1520. kgsl_sharedmem_writel(device->scratch,
  1521. SCRATCH_RB_OFFSET(rb->id, rptr), 0);
  1522. rb->wptr = 0;
  1523. rb->_wptr = 0;
  1524. rb->wptr_preempt_end = ~0;
  1525. }
  1526. /* Set up the current ringbuffer */
  1527. rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
  1528. addr = SCRATCH_RB_GPU_ADDR(device, rb->id, rptr);
  1529. kgsl_regwrite(device, A5XX_CP_RB_RPTR_ADDR_LO, lower_32_bits(addr));
  1530. kgsl_regwrite(device, A5XX_CP_RB_RPTR_ADDR_HI, upper_32_bits(addr));
  1531. /*
  1532. * The size of the ringbuffer in the hardware is the log2
  1533. * representation of the size in quadwords (sizedwords / 2).
  1534. * Also disable the host RPTR shadow register as it might be unreliable
  1535. * in certain circumstances.
  1536. */
  1537. kgsl_regwrite(device, A5XX_CP_RB_CNTL,
  1538. A5XX_CP_RB_CNTL_DEFAULT);
  1539. kgsl_regwrite(device, A5XX_CP_RB_BASE,
  1540. lower_32_bits(rb->buffer_desc->gpuaddr));
  1541. kgsl_regwrite(device, A5XX_CP_RB_BASE_HI,
  1542. upper_32_bits(rb->buffer_desc->gpuaddr));
  1543. ret = a5xx_microcode_load(adreno_dev);
  1544. if (ret)
  1545. return ret;
  1546. /* clear ME_HALT to start micro engine */
  1547. kgsl_regwrite(device, A5XX_CP_ME_CNTL, 0);
  1548. ret = a5xx_send_me_init(adreno_dev, rb);
  1549. if (ret)
  1550. return ret;
  1551. /* Run the critical packets if we need to */
  1552. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) {
  1553. ret = a5xx_critical_packet_submit(adreno_dev, rb);
  1554. if (ret)
  1555. return ret;
  1556. }
  1557. /*
  1558. * Try to execute the zap shader if it exists, otherwise just try
  1559. * directly writing to the control register
  1560. */
  1561. if (!adreno_dev->zap_loaded)
  1562. kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0);
  1563. else {
  1564. cmds = adreno_ringbuffer_allocspace(rb, 2);
  1565. if (IS_ERR(cmds))
  1566. return PTR_ERR(cmds);
  1567. *cmds++ = cp_packet(adreno_dev, CP_SET_SECURE_MODE, 1);
  1568. *cmds++ = 0;
  1569. ret = a5xx_ringbuffer_submit(rb, NULL, true);
  1570. if (!ret) {
  1571. ret = adreno_spin_idle(adreno_dev, 2000);
  1572. if (ret) {
  1573. a5xx_spin_idle_debug(adreno_dev,
  1574. "Switch to unsecure failed to idle\n");
  1575. return ret;
  1576. }
  1577. }
  1578. }
  1579. ret = a5xx_gpmu_init(adreno_dev);
  1580. if (ret)
  1581. return ret;
  1582. a5xx_post_start(adreno_dev);
  1583. return 0;
  1584. }
  1585. /*
  1586. * a5xx_microcode_read() - Read microcode
  1587. * @adreno_dev: Pointer to adreno device
  1588. */
  1589. static int a5xx_microcode_read(struct adreno_device *adreno_dev)
  1590. {
  1591. int ret;
  1592. struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
  1593. struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
  1594. const struct adreno_a5xx_core *a5xx_core = to_a5xx_core(adreno_dev);
  1595. ret = adreno_get_firmware(adreno_dev, a5xx_core->pm4fw_name, pm4_fw);
  1596. if (ret)
  1597. return ret;
  1598. ret = adreno_get_firmware(adreno_dev, a5xx_core->pfpfw_name, pfp_fw);
  1599. if (ret)
  1600. return ret;
  1601. ret = _load_gpmu_firmware(adreno_dev);
  1602. if (ret)
  1603. return ret;
  1604. _load_regfile(adreno_dev);
  1605. return ret;
  1606. }
  1607. /* Register offset defines for A5XX, in order of enum adreno_regs */
  1608. static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
  1609. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A5XX_CP_RB_BASE),
  1610. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A5XX_CP_RB_BASE_HI),
  1611. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
  1612. A5XX_CP_RB_RPTR_ADDR_LO),
  1613. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
  1614. A5XX_CP_RB_RPTR_ADDR_HI),
  1615. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A5XX_CP_RB_RPTR),
  1616. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A5XX_CP_RB_WPTR),
  1617. ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A5XX_CP_ME_CNTL),
  1618. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A5XX_CP_RB_CNTL),
  1619. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A5XX_CP_IB1_BASE),
  1620. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A5XX_CP_IB1_BASE_HI),
  1621. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A5XX_CP_IB1_BUFSZ),
  1622. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A5XX_CP_IB2_BASE),
  1623. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A5XX_CP_IB2_BASE_HI),
  1624. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A5XX_CP_IB2_BUFSZ),
  1625. ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_REG_0, A5XX_CP_PROTECT_REG_0),
  1626. ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A5XX_CP_CONTEXT_SWITCH_CNTL),
  1627. ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_DEBUG, ADRENO_REG_SKIP),
  1628. ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_DISABLE, ADRENO_REG_SKIP),
  1629. ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
  1630. A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO),
  1631. ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
  1632. A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI),
  1633. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A5XX_RBBM_STATUS),
  1634. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A5XX_RBBM_STATUS3),
  1635. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A5XX_RBBM_INT_0_MASK),
  1636. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A5XX_RBBM_CLOCK_CNTL),
  1637. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A5XX_RBBM_SW_RESET_CMD),
  1638. ADRENO_REG_DEFINE(ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
  1639. A5XX_GPMU_POWER_COUNTER_ENABLE),
  1640. };
  1641. static void a5xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
  1642. {
  1643. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1644. unsigned int status1, status2;
  1645. kgsl_regread(device, A5XX_CP_INTERRUPT_STATUS, &status1);
  1646. if (status1 & BIT(A5XX_CP_OPCODE_ERROR)) {
  1647. unsigned int val;
  1648. kgsl_regwrite(device, A5XX_CP_PFP_STAT_ADDR, 0);
  1649. /*
  1650. * A5XX_CP_PFP_STAT_DATA is indexed, so read it twice to get the
  1651. * value we want
  1652. */
  1653. kgsl_regread(device, A5XX_CP_PFP_STAT_DATA, &val);
  1654. kgsl_regread(device, A5XX_CP_PFP_STAT_DATA, &val);
  1655. dev_crit_ratelimited(device->dev,
  1656. "ringbuffer opcode error | possible opcode=0x%8.8X\n",
  1657. val);
  1658. }
  1659. if (status1 & BIT(A5XX_CP_RESERVED_BIT_ERROR))
  1660. dev_crit_ratelimited(device->dev,
  1661. "ringbuffer reserved bit error interrupt\n");
  1662. if (status1 & BIT(A5XX_CP_HW_FAULT_ERROR)) {
  1663. kgsl_regread(device, A5XX_CP_HW_FAULT, &status2);
  1664. dev_crit_ratelimited(device->dev,
  1665. "CP | Ringbuffer HW fault | status=%x\n",
  1666. status2);
  1667. }
  1668. if (status1 & BIT(A5XX_CP_DMA_ERROR))
  1669. dev_crit_ratelimited(device->dev, "CP | DMA error\n");
  1670. if (status1 & BIT(A5XX_CP_REGISTER_PROTECTION_ERROR)) {
  1671. kgsl_regread(device, A5XX_CP_PROTECT_STATUS, &status2);
  1672. dev_crit_ratelimited(device->dev,
  1673. "CP | Protected mode error| %s | addr=%x | status=%x\n",
  1674. status2 & (1 << 24) ? "WRITE" : "READ",
  1675. (status2 & 0xFFFFF) >> 2, status2);
  1676. }
  1677. if (status1 & BIT(A5XX_CP_AHB_ERROR)) {
  1678. kgsl_regread(device, A5XX_CP_AHB_FAULT, &status2);
  1679. dev_crit_ratelimited(device->dev,
  1680. "ringbuffer AHB error interrupt | status=%x\n",
  1681. status2);
  1682. }
  1683. }
  1684. static void a5xx_err_callback(struct adreno_device *adreno_dev, int bit)
  1685. {
  1686. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1687. unsigned int reg;
  1688. switch (bit) {
  1689. case A5XX_INT_RBBM_AHB_ERROR: {
  1690. kgsl_regread(device, A5XX_RBBM_AHB_ERROR_STATUS, &reg);
  1691. /*
  1692. * Return the word address of the erroring register so that it
  1693. * matches the register specification
  1694. */
  1695. dev_crit_ratelimited(device->dev,
  1696. "RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
  1697. reg & (1 << 28) ? "WRITE" : "READ",
  1698. (reg & 0xFFFFF) >> 2,
  1699. (reg >> 20) & 0x3,
  1700. (reg >> 24) & 0xF);
  1701. /* Clear the error */
  1702. kgsl_regwrite(device, A5XX_RBBM_AHB_CMD, (1 << 4));
  1703. break;
  1704. }
  1705. case A5XX_INT_RBBM_TRANSFER_TIMEOUT:
  1706. dev_crit_ratelimited(device->dev,
  1707. "RBBM: AHB transfer timeout\n");
  1708. break;
  1709. case A5XX_INT_RBBM_ME_MS_TIMEOUT:
  1710. kgsl_regread(device, A5XX_RBBM_AHB_ME_SPLIT_STATUS, &reg);
  1711. dev_crit_ratelimited(device->dev,
  1712. "RBBM | ME master split timeout | status=%x\n",
  1713. reg);
  1714. break;
  1715. case A5XX_INT_RBBM_PFP_MS_TIMEOUT:
  1716. kgsl_regread(device, A5XX_RBBM_AHB_PFP_SPLIT_STATUS, &reg);
  1717. dev_crit_ratelimited(device->dev,
  1718. "RBBM | PFP master split timeout | status=%x\n",
  1719. reg);
  1720. break;
  1721. case A5XX_INT_RBBM_ETS_MS_TIMEOUT:
  1722. dev_crit_ratelimited(device->dev,
  1723. "RBBM: ME master split timeout\n");
  1724. break;
  1725. case A5XX_INT_RBBM_ATB_ASYNC_OVERFLOW:
  1726. dev_crit_ratelimited(device->dev,
  1727. "RBBM: ATB ASYNC overflow\n");
  1728. break;
  1729. case A5XX_INT_RBBM_ATB_BUS_OVERFLOW:
  1730. dev_crit_ratelimited(device->dev,
  1731. "RBBM: ATB bus overflow\n");
  1732. break;
  1733. case A5XX_INT_UCHE_OOB_ACCESS:
  1734. dev_crit_ratelimited(device->dev,
  1735. "UCHE: Out of bounds access\n");
  1736. break;
  1737. case A5XX_INT_UCHE_TRAP_INTR:
  1738. dev_crit_ratelimited(device->dev, "UCHE: Trap interrupt\n");
  1739. break;
  1740. case A5XX_INT_GPMU_VOLTAGE_DROOP:
  1741. dev_crit_ratelimited(device->dev, "GPMU: Voltage droop\n");
  1742. break;
  1743. default:
  1744. dev_crit_ratelimited(device->dev, "Unknown interrupt %d\n",
  1745. bit);
  1746. }
  1747. }
  1748. static void a5xx_irq_storm_worker(struct work_struct *work)
  1749. {
  1750. struct adreno_device *adreno_dev = container_of(work,
  1751. struct adreno_device, irq_storm_work);
  1752. struct kgsl_device *device = &adreno_dev->dev;
  1753. unsigned int status;
  1754. mutex_lock(&device->mutex);
  1755. /* Wait for the storm to clear up */
  1756. do {
  1757. kgsl_regwrite(device, A5XX_RBBM_INT_CLEAR_CMD,
  1758. BIT(A5XX_INT_CP_CACHE_FLUSH_TS));
  1759. kgsl_regread(device, A5XX_RBBM_INT_0_STATUS, &status);
  1760. } while (status & BIT(A5XX_INT_CP_CACHE_FLUSH_TS));
  1761. /* Re-enable the interrupt bit in the mask */
  1762. adreno_dev->irq_mask |= BIT(A5XX_INT_CP_CACHE_FLUSH_TS);
  1763. kgsl_regwrite(device, A5XX_RBBM_INT_0_MASK, adreno_dev->irq_mask);
  1764. clear_bit(ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED, &adreno_dev->priv);
  1765. dev_warn(device->dev, "Re-enabled A5XX_INT_CP_CACHE_FLUSH_TS\n");
  1766. mutex_unlock(&device->mutex);
  1767. /* Reschedule just to make sure everything retires */
  1768. adreno_dispatcher_schedule(device);
  1769. }
  1770. static void a5xx_cp_callback(struct adreno_device *adreno_dev, int bit)
  1771. {
  1772. struct kgsl_device *device = &adreno_dev->dev;
  1773. unsigned int cur;
  1774. static unsigned int count;
  1775. static unsigned int prev;
  1776. if (test_bit(ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED, &adreno_dev->priv))
  1777. return;
  1778. kgsl_sharedmem_readl(device->memstore, &cur,
  1779. KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
  1780. ref_wait_ts));
  1781. /*
  1782. * prev holds a previously read value
  1783. * from memory. It should be changed by the GPU with every
  1784. * interrupt. If the value we know about and the value we just
  1785. * read are the same, then we are likely in a storm.
  1786. * If this happens twice, disable the interrupt in the mask
  1787. * so the dispatcher can take care of the issue. It is then
  1788. * up to the dispatcher to re-enable the mask once all work
  1789. * is done and the storm has ended.
  1790. */
  1791. if (prev == cur) {
  1792. count++;
  1793. if (count == 2) {
  1794. /* disable interrupt from the mask */
  1795. set_bit(ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED,
  1796. &adreno_dev->priv);
  1797. adreno_dev->irq_mask &=
  1798. ~BIT(A5XX_INT_CP_CACHE_FLUSH_TS);
  1799. kgsl_regwrite(device, A5XX_RBBM_INT_0_MASK,
  1800. adreno_dev->irq_mask);
  1801. kgsl_schedule_work(&adreno_dev->irq_storm_work);
  1802. return;
  1803. }
  1804. } else {
  1805. count = 0;
  1806. prev = cur;
  1807. }
  1808. a5xx_preemption_trigger(adreno_dev);
  1809. adreno_dispatcher_schedule(device);
  1810. }
  1811. static const char *gpmu_int_msg[32] = {
  1812. [FW_INTR_INFO] = "FW_INTR_INFO",
  1813. [LLM_ACK_ERR_INTR] = "LLM_ACK_ERR_INTR",
  1814. [ISENS_TRIM_ERR_INTR] = "ISENS_TRIM_ERR_INTR",
  1815. [ISENS_ERR_INTR] = "ISENS_ERR_INTR",
  1816. [ISENS_IDLE_ERR_INTR] = "ISENS_IDLE_ERR_INTR",
  1817. [ISENS_PWR_ON_ERR_INTR] = "ISENS_PWR_ON_ERR_INTR",
  1818. [6 ... 30] = "",
  1819. [WDOG_EXPITED] = "WDOG_EXPITED"};
  1820. static void a5xx_gpmu_int_callback(struct adreno_device *adreno_dev, int bit)
  1821. {
  1822. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1823. unsigned int reg, i;
  1824. kgsl_regread(device, A5XX_GPMU_RBBM_INTR_INFO, &reg);
  1825. if (reg & (~VALID_GPMU_IRQ)) {
  1826. dev_crit_ratelimited(device->dev,
  1827. "GPMU: Unknown IRQ mask 0x%08lx in 0x%08x\n",
  1828. reg & (~VALID_GPMU_IRQ), reg);
  1829. }
  1830. for (i = 0; i < 32; i++)
  1831. switch (reg & BIT(i)) {
  1832. case BIT(WDOG_EXPITED):
  1833. if (test_and_clear_bit(ADRENO_DEVICE_GPMU_INITIALIZED,
  1834. &adreno_dev->priv)) {
  1835. /* Stop GPMU */
  1836. kgsl_regwrite(device,
  1837. A5XX_GPMU_CM3_SYSRESET, 1);
  1838. kgsl_schedule_work(&adreno_dev->gpmu_work);
  1839. }
  1840. fallthrough;
  1841. case BIT(FW_INTR_INFO):
  1842. fallthrough;
  1843. case BIT(LLM_ACK_ERR_INTR):
  1844. fallthrough;
  1845. case BIT(ISENS_TRIM_ERR_INTR):
  1846. fallthrough;
  1847. case BIT(ISENS_ERR_INTR):
  1848. fallthrough;
  1849. case BIT(ISENS_IDLE_ERR_INTR):
  1850. fallthrough;
  1851. case BIT(ISENS_PWR_ON_ERR_INTR):
  1852. dev_crit_ratelimited(device->dev,
  1853. "GPMU: interrupt %s(%08lx)\n",
  1854. gpmu_int_msg[i],
  1855. BIT(i));
  1856. break;
  1857. }
  1858. }
  1859. /*
  1860. * a5x_gpc_err_int_callback() - Isr for GPC error interrupts
  1861. * @adreno_dev: Pointer to device
  1862. * @bit: Interrupt bit
  1863. */
  1864. static void a5x_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit)
  1865. {
  1866. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1867. /*
  1868. * GPC error is typically the result of mistake SW programming.
  1869. * Force GPU fault for this interrupt so that we can debug it
  1870. * with help of register dump.
  1871. */
  1872. dev_crit(device->dev, "RBBM: GPC error\n");
  1873. adreno_irqctrl(adreno_dev, 0);
  1874. /* Trigger a fault in the dispatcher - this will effect a restart */
  1875. adreno_dispatcher_fault(adreno_dev, ADRENO_SOFT_FAULT);
  1876. adreno_dispatcher_schedule(device);
  1877. }
  1878. u64 a5xx_read_alwayson(struct adreno_device *adreno_dev)
  1879. {
  1880. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1881. u32 lo = 0, hi = 0;
  1882. kgsl_regread(device, A5XX_RBBM_ALWAYSON_COUNTER_LO, &lo);
  1883. /* The upper 32 bits are only reliable on A540 targets */
  1884. if (adreno_is_a540(adreno_dev))
  1885. kgsl_regread(device, A5XX_RBBM_ALWAYSON_COUNTER_HI, &hi);
  1886. return (((u64) hi) << 32) | lo;
  1887. }
  1888. static const struct adreno_irq_funcs a5xx_irq_funcs[32] = {
  1889. ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
  1890. ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 1 - RBBM_AHB_ERROR */
  1891. ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 2 - RBBM_TRANSFER_TIMEOUT */
  1892. /* 3 - RBBM_ME_MASTER_SPLIT_TIMEOUT */
  1893. ADRENO_IRQ_CALLBACK(a5xx_err_callback),
  1894. /* 4 - RBBM_PFP_MASTER_SPLIT_TIMEOUT */
  1895. ADRENO_IRQ_CALLBACK(a5xx_err_callback),
  1896. /* 5 - RBBM_ETS_MASTER_SPLIT_TIMEOUT */
  1897. ADRENO_IRQ_CALLBACK(a5xx_err_callback),
  1898. /* 6 - RBBM_ATB_ASYNC_OVERFLOW */
  1899. ADRENO_IRQ_CALLBACK(a5xx_err_callback),
  1900. ADRENO_IRQ_CALLBACK(a5x_gpc_err_int_callback), /* 7 - GPC_ERR */
  1901. ADRENO_IRQ_CALLBACK(a5xx_preempt_callback),/* 8 - CP_SW */
  1902. ADRENO_IRQ_CALLBACK(a5xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
  1903. /* 10 - CP_CCU_FLUSH_DEPTH_TS */
  1904. ADRENO_IRQ_CALLBACK(NULL),
  1905. /* 11 - CP_CCU_FLUSH_COLOR_TS */
  1906. ADRENO_IRQ_CALLBACK(NULL),
  1907. /* 12 - CP_CCU_RESOLVE_TS */
  1908. ADRENO_IRQ_CALLBACK(NULL),
  1909. ADRENO_IRQ_CALLBACK(NULL), /* 13 - CP_IB2_INT */
  1910. ADRENO_IRQ_CALLBACK(NULL), /* 14 - CP_IB1_INT */
  1911. ADRENO_IRQ_CALLBACK(NULL), /* 15 - CP_RB_INT */
  1912. /* 16 - CCP_UNUSED_1 */
  1913. ADRENO_IRQ_CALLBACK(NULL),
  1914. ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
  1915. ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_WT_DONE_TS */
  1916. ADRENO_IRQ_CALLBACK(NULL), /* 19 - UNKNOWN_1 */
  1917. ADRENO_IRQ_CALLBACK(a5xx_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
  1918. /* 21 - UNUSED_2 */
  1919. ADRENO_IRQ_CALLBACK(NULL),
  1920. ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 22 - RBBM_ATB_BUS_OVERFLOW */
  1921. /* 23 - MISC_HANG_DETECT */
  1922. ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
  1923. ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 24 - UCHE_OOB_ACCESS */
  1924. ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 25 - UCHE_TRAP_INTR */
  1925. ADRENO_IRQ_CALLBACK(NULL), /* 26 - DEBBUS_INTR_0 */
  1926. ADRENO_IRQ_CALLBACK(NULL), /* 27 - DEBBUS_INTR_1 */
  1927. ADRENO_IRQ_CALLBACK(a5xx_err_callback), /* 28 - GPMU_VOLTAGE_DROOP */
  1928. ADRENO_IRQ_CALLBACK(a5xx_gpmu_int_callback), /* 29 - GPMU_FIRMWARE */
  1929. ADRENO_IRQ_CALLBACK(NULL), /* 30 - ISDB_CPU_IRQ */
  1930. ADRENO_IRQ_CALLBACK(NULL), /* 31 - ISDB_UNDER_DEBUG */
  1931. };
  1932. static irqreturn_t a5xx_irq_handler(struct adreno_device *adreno_dev)
  1933. {
  1934. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1935. irqreturn_t ret;
  1936. u32 status;
  1937. kgsl_regread(device, A5XX_RBBM_INT_0_STATUS, &status);
  1938. /*
  1939. * Clear all the interrupt bits except A5XX_INT_RBBM_AHB_ERROR.
  1940. * The interrupt will stay asserted until it is cleared by the handler
  1941. * so don't touch it yet to avoid a storm
  1942. */
  1943. kgsl_regwrite(device, A5XX_RBBM_INT_CLEAR_CMD,
  1944. status & ~A5XX_INT_RBBM_AHB_ERROR);
  1945. /* Call the helper function for callbacks */
  1946. ret = adreno_irq_callbacks(adreno_dev, a5xx_irq_funcs, status);
  1947. trace_kgsl_a5xx_irq_status(adreno_dev, status);
  1948. /* Now chear AHB_ERROR if it was set */
  1949. if (status & A5XX_INT_RBBM_AHB_ERROR)
  1950. kgsl_regwrite(device, A5XX_RBBM_INT_CLEAR_CMD,
  1951. A5XX_INT_RBBM_AHB_ERROR);
  1952. return ret;
  1953. }
  1954. static bool a5xx_hw_isidle(struct adreno_device *adreno_dev)
  1955. {
  1956. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1957. u32 status;
  1958. /*
  1959. * Due to CRC idle throttling the GPU idle hysteresis on a540 can take
  1960. * up to 5uS to expire
  1961. */
  1962. if (adreno_is_a540(adreno_dev))
  1963. udelay(5);
  1964. kgsl_regread(device, A5XX_RBBM_STATUS, &status);
  1965. if (status & 0xfffffffe)
  1966. return false;
  1967. kgsl_regread(device, A5XX_RBBM_INT_0_STATUS, &status);
  1968. /* Return busy if a interrupt is pending */
  1969. return !((status & adreno_dev->irq_mask) ||
  1970. atomic_read(&adreno_dev->pending_irq_refcnt));
  1971. }
  1972. static int a5xx_clear_pending_transactions(struct adreno_device *adreno_dev)
  1973. {
  1974. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1975. u32 mask = A5XX_VBIF_XIN_HALT_CTRL0_MASK;
  1976. int ret;
  1977. kgsl_regwrite(device, A5XX_VBIF_XIN_HALT_CTRL0, mask);
  1978. ret = adreno_wait_for_halt_ack(device, A5XX_VBIF_XIN_HALT_CTRL1, mask);
  1979. kgsl_regwrite(device, A5XX_VBIF_XIN_HALT_CTRL0, 0);
  1980. return ret;
  1981. }
  1982. static bool a5xx_is_hw_collapsible(struct adreno_device *adreno_dev)
  1983. {
  1984. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1985. unsigned int reg;
  1986. if (!adreno_isidle(adreno_dev))
  1987. return false;
  1988. /* If feature is not supported or enabled, no worry */
  1989. if (!adreno_dev->sptp_pc_enabled)
  1990. return true;
  1991. kgsl_regread(device, A5XX_GPMU_SP_PWR_CLK_STATUS, &reg);
  1992. if (reg & BIT(20))
  1993. return false;
  1994. kgsl_regread(device, A5XX_GPMU_RBCCU_PWR_CLK_STATUS, &reg);
  1995. return !(reg & BIT(20));
  1996. }
  1997. static void a5xx_remove(struct adreno_device *adreno_dev)
  1998. {
  1999. if (adreno_preemption_feature_set(adreno_dev))
  2000. del_timer(&adreno_dev->preempt.timer);
  2001. }
  2002. static void a5xx_power_stats(struct adreno_device *adreno_dev,
  2003. struct kgsl_power_stats *stats)
  2004. {
  2005. static u32 rbbm0_hi;
  2006. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2007. s64 freq = kgsl_pwrctrl_active_freq(&device->pwrctrl) / 1000000;
  2008. struct adreno_busy_data *busy = &adreno_dev->busy_data;
  2009. s64 gpu_busy = 0;
  2010. u32 lo, hi;
  2011. s64 adj;
  2012. /* Sometimes this counter can go backwards, so try to detect that */
  2013. kgsl_regread(device, A5XX_RBBM_PERFCTR_RBBM_0_LO, &lo);
  2014. kgsl_regread(device, A5XX_RBBM_PERFCTR_RBBM_0_HI, &hi);
  2015. if (busy->gpu_busy) {
  2016. if (lo < busy->gpu_busy) {
  2017. if (hi == rbbm0_hi) {
  2018. dev_warn_once(device->dev,
  2019. "abmormal value from RBBM_0 perfcounter: %x %x\n",
  2020. lo, busy->gpu_busy);
  2021. gpu_busy = 0;
  2022. } else {
  2023. gpu_busy = (UINT_MAX - busy->gpu_busy) + lo;
  2024. rbbm0_hi = hi;
  2025. }
  2026. } else
  2027. gpu_busy = lo - busy->gpu_busy;
  2028. } else {
  2029. gpu_busy = 0;
  2030. rbbm0_hi = 0;
  2031. }
  2032. busy->gpu_busy = lo;
  2033. adj = a5xx_read_throttling_counters(adreno_dev);
  2034. if (-adj <= gpu_busy)
  2035. gpu_busy += adj;
  2036. else
  2037. gpu_busy = 0;
  2038. stats->busy_time = gpu_busy / freq;
  2039. if (adreno_is_a530(adreno_dev) && adreno_dev->lm_threshold_count)
  2040. kgsl_regread(device, adreno_dev->lm_threshold_count,
  2041. &adreno_dev->lm_threshold_cross);
  2042. else if (adreno_is_a540(adreno_dev))
  2043. adreno_dev->lm_threshold_cross = adj;
  2044. if (!device->pwrctrl.bus_control)
  2045. return;
  2046. stats->ram_time = counter_delta(device, adreno_dev->ram_cycles_lo,
  2047. &busy->bif_ram_cycles);
  2048. stats->ram_wait = counter_delta(device, adreno_dev->starved_ram_lo,
  2049. &busy->bif_starved_ram);
  2050. }
  2051. static int a5xx_setproperty(struct kgsl_device_private *dev_priv,
  2052. u32 type, void __user *value, u32 sizebytes)
  2053. {
  2054. struct kgsl_device *device = dev_priv->device;
  2055. u32 enable;
  2056. if (type != KGSL_PROP_PWRCTRL)
  2057. return -ENODEV;
  2058. if (sizebytes != sizeof(enable))
  2059. return -EINVAL;
  2060. if (copy_from_user(&enable, value, sizeof(enable)))
  2061. return -EFAULT;
  2062. mutex_lock(&device->mutex);
  2063. if (enable) {
  2064. device->pwrctrl.ctrl_flags = 0;
  2065. kgsl_pwrscale_enable(device);
  2066. } else {
  2067. kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
  2068. device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
  2069. kgsl_pwrscale_disable(device, true);
  2070. }
  2071. mutex_unlock(&device->mutex);
  2072. return 0;
  2073. }
  2074. const struct adreno_gpudev adreno_a5xx_gpudev = {
  2075. .reg_offsets = a5xx_register_offsets,
  2076. .probe = a5xx_probe,
  2077. .start = a5xx_start,
  2078. .snapshot = a5xx_snapshot,
  2079. .init = a5xx_init,
  2080. .irq_handler = a5xx_irq_handler,
  2081. .rb_start = a5xx_rb_start,
  2082. .regulator_enable = a5xx_regulator_enable,
  2083. .regulator_disable = a5xx_regulator_disable,
  2084. .pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
  2085. .preemption_schedule = a5xx_preemption_schedule,
  2086. .read_alwayson = a5xx_read_alwayson,
  2087. .hw_isidle = a5xx_hw_isidle,
  2088. .power_ops = &adreno_power_operations,
  2089. .clear_pending_transactions = a5xx_clear_pending_transactions,
  2090. .remove = a5xx_remove,
  2091. .ringbuffer_submitcmd = a5xx_ringbuffer_submitcmd,
  2092. .is_hw_collapsible = a5xx_is_hw_collapsible,
  2093. .power_stats = a5xx_power_stats,
  2094. .setproperty = a5xx_setproperty,
  2095. };