adreno_a6xx_rgmu.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/clk-provider.h>
  7. #include <linux/component.h>
  8. #include <linux/delay.h>
  9. #include <linux/firmware.h>
  10. #include <linux/io.h>
  11. #include <linux/of.h>
  12. #include <linux/of_platform.h>
  13. #include <linux/regulator/consumer.h>
  14. #include "adreno.h"
  15. #include "adreno_a6xx.h"
  16. #include "adreno_a6xx_rgmu.h"
  17. #include "adreno_snapshot.h"
  18. #include "kgsl_bus.h"
  19. #include "kgsl_trace.h"
  20. #include "kgsl_util.h"
  21. #define RGMU_CLK_FREQ 200000000
  22. /* RGMU timeouts */
  23. #define RGMU_IDLE_TIMEOUT 100 /* ms */
  24. #define RGMU_START_TIMEOUT 100 /* ms */
  25. #define GPU_START_TIMEOUT 100 /* ms */
  26. #define GLM_SLEEP_TIMEOUT 10 /* ms */
  27. static const unsigned int a6xx_rgmu_registers[] = {
  28. /* GMU CX */
  29. 0x1F80F, 0x1F83D, 0x1F840, 0x1F8D8, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9CC,
  30. /* GMU AO */
  31. 0x23B03, 0x23B16, 0x23B80, 0x23B82,
  32. /* GPU CC */
  33. 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
  34. 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
  35. 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
  36. 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
  37. 0x26000, 0x26002,
  38. };
  39. static struct a6xx_rgmu_device *to_a6xx_rgmu(struct adreno_device *adreno_dev)
  40. {
  41. struct a6xx_device *a6xx_dev = container_of(adreno_dev,
  42. struct a6xx_device, adreno_dev);
  43. return &a6xx_dev->rgmu;
  44. }
  45. static void a6xx_rgmu_active_count_put(struct adreno_device *adreno_dev)
  46. {
  47. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  48. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  49. return;
  50. if (WARN(atomic_read(&device->active_cnt) == 0,
  51. "Unbalanced get/put calls to KGSL active count\n"))
  52. return;
  53. if (atomic_dec_and_test(&device->active_cnt)) {
  54. kgsl_pwrscale_update_stats(device);
  55. kgsl_pwrscale_update(device);
  56. kgsl_start_idle_timer(device);
  57. }
  58. trace_kgsl_active_count(device,
  59. (unsigned long) __builtin_return_address(0));
  60. wake_up(&device->active_cnt_wq);
  61. }
  62. static irqreturn_t a6xx_rgmu_irq_handler(int irq, void *data)
  63. {
  64. struct kgsl_device *device = data;
  65. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(ADRENO_DEVICE(device));
  66. unsigned int status = 0;
  67. gmu_core_regread(device, A6XX_GMU_AO_HOST_INTERRUPT_STATUS, &status);
  68. if (status & RGMU_AO_IRQ_FENCE_ERR) {
  69. unsigned int fence_status;
  70. gmu_core_regread(device, A6XX_GMU_AHB_FENCE_STATUS,
  71. &fence_status);
  72. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR,
  73. status);
  74. dev_err_ratelimited(&rgmu->pdev->dev,
  75. "FENCE error interrupt received %x\n", fence_status);
  76. }
  77. if (status & ~RGMU_AO_IRQ_MASK)
  78. dev_err_ratelimited(&rgmu->pdev->dev,
  79. "Unhandled RGMU interrupts 0x%lx\n",
  80. status & ~RGMU_AO_IRQ_MASK);
  81. return IRQ_HANDLED;
  82. }
  83. static irqreturn_t a6xx_oob_irq_handler(int irq, void *data)
  84. {
  85. struct kgsl_device *device = data;
  86. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  87. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  88. unsigned int status = 0;
  89. gmu_core_regread(device, A6XX_GMU_GMU2HOST_INTR_INFO, &status);
  90. if (status & RGMU_OOB_IRQ_ERR_MSG) {
  91. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, status);
  92. dev_err_ratelimited(&rgmu->pdev->dev,
  93. "RGMU oob irq error\n");
  94. adreno_dispatcher_fault(adreno_dev, ADRENO_GMU_FAULT);
  95. }
  96. if (status & ~RGMU_OOB_IRQ_MASK)
  97. dev_err_ratelimited(&rgmu->pdev->dev,
  98. "Unhandled OOB interrupts 0x%lx\n",
  99. status & ~RGMU_OOB_IRQ_MASK);
  100. return IRQ_HANDLED;
  101. }
  102. static const char *oob_to_str(enum oob_request req)
  103. {
  104. if (req == oob_gpu)
  105. return "oob_gpu";
  106. else if (req == oob_perfcntr)
  107. return "oob_perfcntr";
  108. return "unknown";
  109. }
  110. /*
  111. * a6xx_rgmu_oob_set() - Set OOB interrupt to RGMU
  112. * @adreno_dev: Pointer to adreno device
  113. * @req: Which of the OOB bits to request
  114. */
  115. static int a6xx_rgmu_oob_set(struct kgsl_device *device,
  116. enum oob_request req)
  117. {
  118. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(ADRENO_DEVICE(device));
  119. int ret, set, check;
  120. if (req == oob_perfcntr && rgmu->num_oob_perfcntr++)
  121. return 0;
  122. set = BIT(req + 16);
  123. check = BIT(req + 16);
  124. gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set);
  125. ret = gmu_core_timed_poll_check(device,
  126. A6XX_GMU_GMU2HOST_INTR_INFO,
  127. check,
  128. GPU_START_TIMEOUT,
  129. check);
  130. if (ret) {
  131. unsigned int status;
  132. if (req == oob_perfcntr)
  133. rgmu->num_oob_perfcntr--;
  134. gmu_core_regread(device, A6XX_RGMU_CX_PCC_DEBUG, &status);
  135. dev_err(&rgmu->pdev->dev,
  136. "Timed out while setting OOB req:%s status:0x%x\n",
  137. oob_to_str(req), status);
  138. gmu_core_fault_snapshot(device);
  139. return ret;
  140. }
  141. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, check);
  142. trace_kgsl_gmu_oob_set(set);
  143. return 0;
  144. }
  145. /*
  146. * a6xx_rgmu_oob_clear() - Clear a previously set OOB request.
  147. * @adreno_dev: Pointer to the adreno device that has the RGMU
  148. * @req: Which of the OOB bits to clear
  149. */
  150. static void a6xx_rgmu_oob_clear(struct kgsl_device *device,
  151. enum oob_request req)
  152. {
  153. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(ADRENO_DEVICE(device));
  154. if (req == oob_perfcntr && --rgmu->num_oob_perfcntr)
  155. return;
  156. gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, BIT(req + 24));
  157. trace_kgsl_gmu_oob_clear(BIT(req + 24));
  158. }
  159. static void a6xx_rgmu_bcl_config(struct kgsl_device *device, bool on)
  160. {
  161. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(ADRENO_DEVICE(device));
  162. if (on) {
  163. /* Enable BCL CRC HW i/f */
  164. gmu_core_regwrite(device,
  165. A6XX_GMU_AO_RGMU_GLM_HW_CRC_DISABLE, 0);
  166. } else {
  167. /* Disable CRC HW i/f */
  168. gmu_core_regwrite(device,
  169. A6XX_GMU_AO_RGMU_GLM_HW_CRC_DISABLE, 1);
  170. /* Wait for HW CRC disable ACK */
  171. if (gmu_core_timed_poll_check(device,
  172. A6XX_GMU_AO_RGMU_GLM_SLEEP_STATUS,
  173. BIT(1), GLM_SLEEP_TIMEOUT, BIT(1)))
  174. dev_err_ratelimited(&rgmu->pdev->dev,
  175. "Timed out waiting for HW CRC disable acknowledgment\n");
  176. /* Pull down the valid RGMU_GLM_SLEEP_CTRL[7] to 0 */
  177. gmu_core_regrmw(device, A6XX_GMU_AO_RGMU_GLM_SLEEP_CTRL,
  178. BIT(7), 0);
  179. }
  180. }
  181. static void a6xx_rgmu_irq_enable(struct adreno_device *adreno_dev)
  182. {
  183. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  184. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  185. /* Clear pending IRQs and Unmask needed IRQs */
  186. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, 0xffffffff);
  187. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR, 0xffffffff);
  188. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK,
  189. ~((unsigned int)RGMU_OOB_IRQ_MASK));
  190. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK,
  191. (unsigned int)~RGMU_AO_IRQ_MASK);
  192. /* Enable all IRQs on host */
  193. enable_irq(rgmu->oob_interrupt_num);
  194. enable_irq(rgmu->rgmu_interrupt_num);
  195. }
  196. static void a6xx_rgmu_irq_disable(struct adreno_device *adreno_dev)
  197. {
  198. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  199. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  200. /* Disable all IRQs on host */
  201. disable_irq(rgmu->rgmu_interrupt_num);
  202. disable_irq(rgmu->oob_interrupt_num);
  203. /* Mask all IRQs and clear pending IRQs */
  204. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK, 0xffffffff);
  205. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK, 0xffffffff);
  206. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, 0xffffffff);
  207. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR, 0xffffffff);
  208. }
  209. static int a6xx_rgmu_ifpc_store(struct kgsl_device *device,
  210. unsigned int val)
  211. {
  212. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  213. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  214. unsigned int requested_idle_level;
  215. if (!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
  216. return -EINVAL;
  217. if (val)
  218. requested_idle_level = GPU_HW_IFPC;
  219. else
  220. requested_idle_level = GPU_HW_ACTIVE;
  221. if (requested_idle_level == rgmu->idle_level)
  222. return 0;
  223. /* Power cycle the GPU for changes to take effect */
  224. return adreno_power_cycle_u32(adreno_dev, &rgmu->idle_level,
  225. requested_idle_level);
  226. }
  227. static unsigned int a6xx_rgmu_ifpc_isenabled(struct kgsl_device *device)
  228. {
  229. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(ADRENO_DEVICE(device));
  230. return rgmu->idle_level == GPU_HW_IFPC;
  231. }
  232. static void a6xx_rgmu_prepare_stop(struct kgsl_device *device)
  233. {
  234. /* Turn off GX_MEM retention */
  235. kgsl_regwrite(device, A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0);
  236. }
  237. #define GX_GDSC_POWER_OFF BIT(6)
  238. bool a6xx_rgmu_gx_is_on(struct adreno_device *adreno_dev)
  239. {
  240. unsigned int val;
  241. gmu_core_regread(KGSL_DEVICE(adreno_dev),
  242. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
  243. return !(val & GX_GDSC_POWER_OFF);
  244. }
  245. static int a6xx_rgmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
  246. {
  247. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  248. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  249. unsigned int reg[10] = {0};
  250. unsigned long t;
  251. uint64_t ts1, ts2, ts3;
  252. if (rgmu->idle_level != GPU_HW_IFPC)
  253. return 0;
  254. ts1 = a6xx_read_alwayson(adreno_dev);
  255. /* FIXME: readl_poll_timeout? */
  256. t = jiffies + msecs_to_jiffies(RGMU_IDLE_TIMEOUT);
  257. do {
  258. gmu_core_regread(device,
  259. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg[0]);
  260. if (reg[0] & GX_GDSC_POWER_OFF)
  261. return 0;
  262. /* Wait 10us to reduce unnecessary AHB bus traffic */
  263. usleep_range(10, 100);
  264. } while (!time_after(jiffies, t));
  265. ts2 = a6xx_read_alwayson(adreno_dev);
  266. /* Do one last read incase it succeeds */
  267. gmu_core_regread(device,
  268. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg[0]);
  269. if (reg[0] & GX_GDSC_POWER_OFF)
  270. return 0;
  271. ts3 = a6xx_read_alwayson(adreno_dev);
  272. /* Collect abort data to help with debugging */
  273. gmu_core_regread(device, A6XX_RGMU_CX_PCC_DEBUG, &reg[1]);
  274. gmu_core_regread(device, A6XX_RGMU_CX_PCC_STATUS, &reg[2]);
  275. gmu_core_regread(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg[3]);
  276. kgsl_regread(device, A6XX_CP_STATUS_1, &reg[4]);
  277. gmu_core_regread(device, A6XX_GMU_RBBM_INT_UNMASKED_STATUS, &reg[5]);
  278. gmu_core_regread(device, A6XX_GMU_GMU_PWR_COL_KEEPALIVE, &reg[6]);
  279. kgsl_regread(device, A6XX_CP_CP2GMU_STATUS, &reg[7]);
  280. kgsl_regread(device, A6XX_CP_CONTEXT_SWITCH_CNTL, &reg[8]);
  281. gmu_core_regread(device, A6XX_GMU_AO_SPARE_CNTL, &reg[9]);
  282. dev_err(&rgmu->pdev->dev,
  283. "----------------------[ RGMU error ]----------------------\n");
  284. dev_err(&rgmu->pdev->dev, "Timeout waiting for lowest idle level\n");
  285. dev_err(&rgmu->pdev->dev,
  286. "Timestamps: %llx %llx %llx\n", ts1, ts2, ts3);
  287. dev_err(&rgmu->pdev->dev,
  288. "SPTPRAC_PWR_CLK_STATUS=%x PCC_DEBUG=%x PCC_STATUS=%x\n",
  289. reg[0], reg[1], reg[2]);
  290. dev_err(&rgmu->pdev->dev,
  291. "CX_BUSY_STATUS=%x CP_STATUS_1=%x\n", reg[3], reg[4]);
  292. dev_err(&rgmu->pdev->dev,
  293. "RBBM_INT_UNMASKED_STATUS=%x PWR_COL_KEEPALIVE=%x\n",
  294. reg[5], reg[6]);
  295. dev_err(&rgmu->pdev->dev,
  296. "CP2GMU_STATUS=%x CONTEXT_SWITCH_CNTL=%x AO_SPARE_CNTL=%x\n",
  297. reg[7], reg[8], reg[9]);
  298. WARN_ON(1);
  299. gmu_core_fault_snapshot(device);
  300. return -ETIMEDOUT;
  301. }
  302. /*
  303. * The lowest 16 bits of this value are the number of XO clock cycles
  304. * for main hysteresis. This is the first hysteresis. Here we set it
  305. * to 0x1680 cycles, or 300 us. The highest 16 bits of this value are
  306. * the number of XO clock cycles for short hysteresis. This happens
  307. * after main hysteresis. Here we set it to 0xA cycles, or 0.5 us.
  308. */
  309. #define A6X_RGMU_LONG_IFPC_HYST FIELD_PREP(GENMASK(15, 0), 0x1680)
  310. #define A6X_RGMU_SHORT_IFPC_HYST FIELD_PREP(GENMASK(31, 16), 0xA)
  311. /* Minimum IFPC timer (200usec) allowed to override default value */
  312. #define A6X_RGMU_LONG_IFPC_HYST_FLOOR FIELD_PREP(GENMASK(15, 0), 0x0F00)
  313. /* HOSTTOGMU and TIMER0/1 interrupt mask: 0x20060 */
  314. #define RGMU_INTR_EN_MASK (BIT(5) | BIT(6) | BIT(17))
  315. /* RGMU FENCE RANGE MASK */
  316. #define RGMU_FENCE_RANGE_MASK ((0x1 << 31) | ((0xA << 2) << 18) | (0x8A0))
  317. static int a6xx_rgmu_fw_start(struct adreno_device *adreno_dev,
  318. unsigned int boot_state)
  319. {
  320. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  321. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  322. unsigned int status;
  323. int i;
  324. switch (boot_state) {
  325. case GMU_COLD_BOOT:
  326. case GMU_WARM_BOOT:
  327. /* Turn on TCM retention */
  328. gmu_core_regwrite(device, A6XX_GMU_GENERAL_7, 1);
  329. /* Load RGMU FW image via AHB bus */
  330. for (i = 0; i < rgmu->fw_size; i++)
  331. gmu_core_regwrite(device, A6XX_GMU_CM3_ITCM_START + i,
  332. rgmu->fw_hostptr[i]);
  333. break;
  334. }
  335. /* IFPC Feature Enable */
  336. if (rgmu->idle_level == GPU_HW_IFPC) {
  337. gmu_core_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
  338. A6X_RGMU_SHORT_IFPC_HYST | adreno_dev->ifpc_hyst);
  339. gmu_core_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
  340. BIT(0));
  341. }
  342. /* For RGMU CX interrupt */
  343. gmu_core_regwrite(device, A6XX_RGMU_CX_INTR_GEN_EN, RGMU_INTR_EN_MASK);
  344. /* Enable GMU AO to host interrupt */
  345. gmu_core_regwrite(device, A6XX_GMU_AO_INTERRUPT_EN, RGMU_AO_IRQ_MASK);
  346. /* For OOB */
  347. gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_EN_2, 0x00FF0000);
  348. gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_EN_3, 0xFF000000);
  349. /* Fence Address range configuration */
  350. gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
  351. RGMU_FENCE_RANGE_MASK);
  352. /* During IFPC RGMU will put fence in drop mode so we would
  353. * need to put fence allow mode during slumber out sequence.
  354. */
  355. gmu_core_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
  356. /* BCL ON Sequence */
  357. a6xx_rgmu_bcl_config(device, true);
  358. /* Write 0 first to make sure that rgmu is reset */
  359. gmu_core_regwrite(device, A6XX_RGMU_CX_PCC_CTRL, 0);
  360. /* Make sure putting in reset doesn't happen after writing 1 */
  361. wmb();
  362. /* Bring rgmu out of reset */
  363. gmu_core_regwrite(device, A6XX_RGMU_CX_PCC_CTRL, 1);
  364. if (gmu_core_timed_poll_check(device, A6XX_RGMU_CX_PCC_INIT_RESULT,
  365. BIT(0), RGMU_START_TIMEOUT, BIT(0))) {
  366. gmu_core_regread(device, A6XX_RGMU_CX_PCC_DEBUG, &status);
  367. dev_err(&rgmu->pdev->dev,
  368. "rgmu boot Failed. status:%08x\n", status);
  369. gmu_core_fault_snapshot(device);
  370. return -ETIMEDOUT;
  371. }
  372. /* Read the RGMU firmware version from registers */
  373. gmu_core_regread(device, A6XX_GMU_GENERAL_0, &rgmu->ver);
  374. return 0;
  375. }
  376. static void a6xx_rgmu_notify_slumber(struct adreno_device *adreno_dev)
  377. {
  378. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  379. /* Disable the power counter so that the RGMU is not busy */
  380. gmu_core_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
  381. /* BCL OFF Sequence */
  382. a6xx_rgmu_bcl_config(device, false);
  383. }
  384. static void a6xx_rgmu_disable_clks(struct adreno_device *adreno_dev)
  385. {
  386. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  387. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  388. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  389. int ret;
  390. /* Check GX GDSC is status */
  391. if (a6xx_rgmu_gx_is_on(adreno_dev)) {
  392. if (IS_ERR_OR_NULL(pwr->gx_gdsc))
  393. return;
  394. /*
  395. * Switch gx gdsc control from RGMU to CPU. Force non-zero
  396. * reference count in clk driver so next disable call will
  397. * turn off the GDSC.
  398. */
  399. ret = regulator_enable(pwr->gx_gdsc);
  400. if (ret)
  401. dev_err(&rgmu->pdev->dev,
  402. "Fail to enable gx gdsc:%d\n", ret);
  403. ret = regulator_disable(pwr->gx_gdsc);
  404. if (ret)
  405. dev_err(&rgmu->pdev->dev,
  406. "Fail to disable gx gdsc:%d\n", ret);
  407. if (a6xx_rgmu_gx_is_on(adreno_dev))
  408. dev_err(&rgmu->pdev->dev, "gx is stuck on\n");
  409. }
  410. clk_bulk_disable_unprepare(rgmu->num_clks, rgmu->clks);
  411. }
  412. void a6xx_rgmu_snapshot(struct adreno_device *adreno_dev,
  413. struct kgsl_snapshot *snapshot)
  414. {
  415. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  416. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  417. adreno_snapshot_registers(device, snapshot, a6xx_rgmu_registers,
  418. ARRAY_SIZE(a6xx_rgmu_registers) / 2);
  419. a6xx_snapshot(adreno_dev, snapshot);
  420. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, 0xffffffff);
  421. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK,
  422. RGMU_OOB_IRQ_MASK);
  423. if (device->gmu_fault)
  424. rgmu->fault_count++;
  425. }
  426. static void a6xx_rgmu_suspend(struct adreno_device *adreno_dev)
  427. {
  428. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  429. a6xx_rgmu_irq_disable(adreno_dev);
  430. a6xx_rgmu_disable_clks(adreno_dev);
  431. kgsl_pwrctrl_disable_cx_gdsc(device);
  432. kgsl_pwrctrl_set_state(KGSL_DEVICE(adreno_dev), KGSL_STATE_NONE);
  433. }
  434. static int a6xx_rgmu_enable_clks(struct adreno_device *adreno_dev)
  435. {
  436. int ret;
  437. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  438. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  439. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  440. ret = clk_set_rate(rgmu->rgmu_clk, RGMU_CLK_FREQ);
  441. if (ret) {
  442. dev_err(&rgmu->pdev->dev, "Couldn't set the RGMU clock\n");
  443. return ret;
  444. }
  445. ret = clk_set_rate(rgmu->gpu_clk,
  446. pwr->pwrlevels[pwr->default_pwrlevel].gpu_freq);
  447. if (ret) {
  448. dev_err(&rgmu->pdev->dev, "Couldn't set the GPU clock\n");
  449. return ret;
  450. }
  451. ret = clk_bulk_prepare_enable(rgmu->num_clks, rgmu->clks);
  452. if (ret) {
  453. dev_err(&rgmu->pdev->dev, "Failed to enable RGMU clocks\n");
  454. return ret;
  455. }
  456. device->state = KGSL_STATE_AWARE;
  457. return 0;
  458. }
  459. /*
  460. * a6xx_rgmu_load_firmware() - Load the ucode into the RGMU TCM
  461. * @adreno_dev: Pointer to adreno device
  462. */
  463. static int a6xx_rgmu_load_firmware(struct adreno_device *adreno_dev)
  464. {
  465. const struct firmware *fw = NULL;
  466. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  467. const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
  468. int ret;
  469. /* RGMU fw already saved and verified so do nothing new */
  470. if (rgmu->fw_hostptr)
  471. return 0;
  472. ret = request_firmware(&fw, a6xx_core->gmufw_name, &rgmu->pdev->dev);
  473. if (ret < 0) {
  474. dev_err(&rgmu->pdev->dev, "request_firmware (%s) failed: %d\n",
  475. a6xx_core->gmufw_name, ret);
  476. return ret;
  477. }
  478. rgmu->fw_hostptr = devm_kmemdup(&rgmu->pdev->dev, fw->data,
  479. fw->size, GFP_KERNEL);
  480. if (rgmu->fw_hostptr)
  481. rgmu->fw_size = (fw->size / sizeof(u32));
  482. release_firmware(fw);
  483. return rgmu->fw_hostptr ? 0 : -ENOMEM;
  484. }
  485. /* Halt RGMU execution */
  486. static void a6xx_rgmu_halt_execution(struct kgsl_device *device, bool force)
  487. {
  488. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(ADRENO_DEVICE(device));
  489. unsigned int index, status, fence;
  490. if (!device->gmu_fault)
  491. return;
  492. /* Mask so there's no interrupt caused by NMI */
  493. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
  494. /* Make sure the interrupt is masked */
  495. wmb();
  496. gmu_core_regread(device, A6XX_RGMU_CX_PCC_DEBUG, &index);
  497. gmu_core_regread(device, A6XX_RGMU_CX_PCC_STATUS, &status);
  498. gmu_core_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &fence);
  499. dev_err(&rgmu->pdev->dev,
  500. "RGMU Fault PCC_DEBUG:0x%x PCC_STATUS:0x%x FENCE_CTRL:0x%x\n",
  501. index, status, fence);
  502. /*
  503. * Write 0 to halt RGMU execution. We halt it in GMU/GPU fault and
  504. * re start PCC execution in recovery path.
  505. */
  506. gmu_core_regwrite(device, A6XX_RGMU_CX_PCC_CTRL, 0);
  507. /*
  508. * Ensure that fence is in allow mode after halting RGMU.
  509. * After halting RGMU we dump snapshot.
  510. */
  511. gmu_core_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
  512. }
  513. static void halt_gbif_arb(struct adreno_device *adreno_dev)
  514. {
  515. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  516. /* Halt all AXI requests */
  517. kgsl_regwrite(device, A6XX_GBIF_HALT, A6XX_GBIF_ARB_HALT_MASK);
  518. adreno_wait_for_halt_ack(device, A6XX_GBIF_HALT_ACK,
  519. A6XX_GBIF_ARB_HALT_MASK);
  520. /* De-assert the halts */
  521. kgsl_regwrite(device, A6XX_GBIF_HALT, 0x0);
  522. }
  523. /* Caller shall ensure GPU is ready for SLUMBER */
  524. static void a6xx_rgmu_power_off(struct adreno_device *adreno_dev)
  525. {
  526. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  527. int ret;
  528. kgsl_pwrctrl_axi(device, false);
  529. if (device->gmu_fault)
  530. return a6xx_rgmu_suspend(adreno_dev);
  531. /* Wait for the lowest idle level we requested */
  532. ret = a6xx_rgmu_wait_for_lowest_idle(adreno_dev);
  533. if (ret)
  534. return a6xx_rgmu_suspend(adreno_dev);
  535. a6xx_rgmu_notify_slumber(adreno_dev);
  536. /* Halt CX traffic and de-assert halt */
  537. halt_gbif_arb(adreno_dev);
  538. a6xx_rgmu_irq_disable(adreno_dev);
  539. a6xx_rgmu_disable_clks(adreno_dev);
  540. kgsl_pwrctrl_disable_cx_gdsc(device);
  541. kgsl_pwrctrl_clear_l3_vote(device);
  542. kgsl_pwrctrl_set_state(device, KGSL_STATE_NONE);
  543. }
  544. static int a6xx_rgmu_clock_set(struct adreno_device *adreno_dev,
  545. u32 pwrlevel)
  546. {
  547. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  548. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  549. int ret;
  550. unsigned long rate;
  551. if (pwrlevel == INVALID_DCVS_IDX)
  552. return -EINVAL;
  553. rate = device->pwrctrl.pwrlevels[pwrlevel].gpu_freq;
  554. ret = clk_set_rate(rgmu->gpu_clk, rate);
  555. if (ret)
  556. dev_err(&rgmu->pdev->dev, "Couldn't set the GPU clock\n");
  557. return ret;
  558. }
  559. static int a6xx_gpu_boot(struct adreno_device *adreno_dev)
  560. {
  561. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  562. int ret;
  563. adreno_set_active_ctxs_null(adreno_dev);
  564. ret = kgsl_mmu_start(device);
  565. if (ret)
  566. goto err;
  567. ret = a6xx_rgmu_oob_set(device, oob_gpu);
  568. if (ret)
  569. goto err_oob_clear;
  570. /* Clear the busy_data stats - we're starting over from scratch */
  571. memset(&adreno_dev->busy_data, 0, sizeof(adreno_dev->busy_data));
  572. /* Restore performance counter registers with saved values */
  573. adreno_perfcounter_restore(adreno_dev);
  574. a6xx_start(adreno_dev);
  575. /* Re-initialize the coresight registers if applicable */
  576. adreno_coresight_start(adreno_dev);
  577. adreno_perfcounter_start(adreno_dev);
  578. /* Clear FSR here in case it is set from a previous pagefault */
  579. kgsl_mmu_clear_fsr(&device->mmu);
  580. a6xx_enable_gpu_irq(adreno_dev);
  581. ret = a6xx_rb_start(adreno_dev);
  582. if (ret) {
  583. a6xx_disable_gpu_irq(adreno_dev);
  584. goto err_oob_clear;
  585. }
  586. /*
  587. * At this point it is safe to assume that we recovered. Setting
  588. * this field allows us to take a new snapshot for the next failure
  589. * if we are prioritizing the first unrecoverable snapshot.
  590. */
  591. if (device->snapshot)
  592. device->snapshot->recovered = true;
  593. /* Start the dispatcher */
  594. adreno_dispatcher_start(device);
  595. device->reset_counter++;
  596. a6xx_rgmu_oob_clear(device, oob_gpu);
  597. return 0;
  598. err_oob_clear:
  599. a6xx_rgmu_oob_clear(device, oob_gpu);
  600. err:
  601. a6xx_rgmu_power_off(adreno_dev);
  602. return ret;
  603. }
  604. static int a6xx_rgmu_boot(struct adreno_device *adreno_dev)
  605. {
  606. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  607. int ret;
  608. kgsl_pwrctrl_request_state(device, KGSL_STATE_AWARE);
  609. ret = kgsl_pwrctrl_enable_cx_gdsc(device);
  610. if (ret)
  611. return ret;
  612. ret = a6xx_rgmu_enable_clks(adreno_dev);
  613. if (ret) {
  614. kgsl_pwrctrl_disable_cx_gdsc(device);
  615. return ret;
  616. }
  617. a6xx_rgmu_irq_enable(adreno_dev);
  618. /* Clear any GPU faults that might have been left over */
  619. adreno_clear_gpu_fault(adreno_dev);
  620. ret = a6xx_rgmu_fw_start(adreno_dev, GMU_COLD_BOOT);
  621. if (ret)
  622. goto err;
  623. /* Request default DCVS level */
  624. ret = kgsl_pwrctrl_set_default_gpu_pwrlevel(device);
  625. if (ret)
  626. goto err;
  627. ret = kgsl_pwrctrl_axi(device, true);
  628. if (ret)
  629. goto err;
  630. device->gmu_fault = false;
  631. kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE);
  632. return 0;
  633. err:
  634. a6xx_rgmu_power_off(adreno_dev);
  635. return ret;
  636. }
  637. static int a6xx_power_off(struct adreno_device *adreno_dev);
  638. static void rgmu_idle_check(struct work_struct *work)
  639. {
  640. struct kgsl_device *device = container_of(work,
  641. struct kgsl_device, idle_check_ws);
  642. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  643. int ret;
  644. mutex_lock(&device->mutex);
  645. if (test_bit(GMU_DISABLE_SLUMBER, &device->gmu_core.flags))
  646. goto done;
  647. if (atomic_read(&device->active_cnt) || time_is_after_jiffies(device->idle_jiffies)) {
  648. kgsl_pwrscale_update(device);
  649. kgsl_start_idle_timer(device);
  650. goto done;
  651. }
  652. spin_lock(&device->submit_lock);
  653. if (device->submit_now) {
  654. spin_unlock(&device->submit_lock);
  655. kgsl_pwrscale_update(device);
  656. kgsl_start_idle_timer(device);
  657. goto done;
  658. }
  659. device->skip_inline_submit = true;
  660. spin_unlock(&device->submit_lock);
  661. ret = a6xx_power_off(adreno_dev);
  662. if (ret == -EBUSY) {
  663. kgsl_pwrscale_update(device);
  664. kgsl_start_idle_timer(device);
  665. }
  666. done:
  667. mutex_unlock(&device->mutex);
  668. }
  669. static void rgmu_idle_timer(struct timer_list *t)
  670. {
  671. struct kgsl_device *device = container_of(t, struct kgsl_device,
  672. idle_timer);
  673. kgsl_schedule_work(&device->idle_check_ws);
  674. }
  675. static int a6xx_boot(struct adreno_device *adreno_dev)
  676. {
  677. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  678. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  679. int ret;
  680. if (test_bit(RGMU_PRIV_GPU_STARTED, &rgmu->flags))
  681. return 0;
  682. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  683. ret = a6xx_rgmu_boot(adreno_dev);
  684. if (ret)
  685. return ret;
  686. ret = a6xx_gpu_boot(adreno_dev);
  687. if (ret)
  688. return ret;
  689. kgsl_start_idle_timer(device);
  690. kgsl_pwrscale_wake(device);
  691. set_bit(RGMU_PRIV_GPU_STARTED, &rgmu->flags);
  692. device->pwrctrl.last_stat_updated = ktime_get();
  693. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  694. return 0;
  695. }
  696. static void a6xx_rgmu_touch_wakeup(struct adreno_device *adreno_dev)
  697. {
  698. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  699. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  700. int ret;
  701. /*
  702. * Do not wake up a suspended device or until the first boot sequence
  703. * has been completed.
  704. */
  705. if (test_bit(RGMU_PRIV_PM_SUSPEND, &rgmu->flags) ||
  706. !test_bit(RGMU_PRIV_FIRST_BOOT_DONE, &rgmu->flags))
  707. return;
  708. if (test_bit(RGMU_PRIV_GPU_STARTED, &rgmu->flags))
  709. goto done;
  710. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  711. ret = a6xx_rgmu_boot(adreno_dev);
  712. if (ret)
  713. return;
  714. ret = a6xx_gpu_boot(adreno_dev);
  715. if (ret)
  716. return;
  717. kgsl_pwrscale_wake(device);
  718. set_bit(RGMU_PRIV_GPU_STARTED, &rgmu->flags);
  719. device->pwrctrl.last_stat_updated = ktime_get();
  720. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  721. done:
  722. /*
  723. * When waking up from a touch event we want to stay active long enough
  724. * for the user to send a draw command. The default idle timer timeout
  725. * is shorter than we want so go ahead and push the idle timer out
  726. * further for this special case
  727. */
  728. mod_timer(&device->idle_timer, jiffies +
  729. msecs_to_jiffies(adreno_wake_timeout));
  730. }
  731. static int a6xx_first_boot(struct adreno_device *adreno_dev)
  732. {
  733. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  734. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  735. int ret;
  736. if (test_bit(RGMU_PRIV_FIRST_BOOT_DONE, &rgmu->flags))
  737. return a6xx_boot(adreno_dev);
  738. ret = a6xx_ringbuffer_init(adreno_dev);
  739. if (ret)
  740. return ret;
  741. ret = a6xx_microcode_read(adreno_dev);
  742. if (ret)
  743. return ret;
  744. ret = a6xx_init(adreno_dev);
  745. if (ret)
  746. return ret;
  747. ret = a6xx_rgmu_load_firmware(adreno_dev);
  748. if (ret)
  749. return ret;
  750. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  751. ret = a6xx_rgmu_boot(adreno_dev);
  752. if (ret)
  753. return ret;
  754. ret = a6xx_gpu_boot(adreno_dev);
  755. if (ret)
  756. return ret;
  757. adreno_get_bus_counters(adreno_dev);
  758. adreno_create_profile_buffer(adreno_dev);
  759. set_bit(RGMU_PRIV_FIRST_BOOT_DONE, &rgmu->flags);
  760. set_bit(RGMU_PRIV_GPU_STARTED, &rgmu->flags);
  761. /*
  762. * There is a possible deadlock scenario during kgsl firmware reading
  763. * (request_firmware) and devfreq update calls. During first boot, kgsl
  764. * device mutex is held and then request_firmware is called for reading
  765. * firmware. request_firmware internally takes dev_pm_qos_mtx lock.
  766. * Whereas in case of devfreq update calls triggered by thermal/bcl or
  767. * devfreq sysfs, it first takes the same dev_pm_qos_mtx lock and then
  768. * tries to take kgsl device mutex as part of get_dev_status/target
  769. * calls. This results in deadlock when both thread are unable to acquire
  770. * the mutex held by other thread. Enable devfreq updates now as we are
  771. * done reading all firmware files.
  772. */
  773. device->pwrscale.devfreq_enabled = true;
  774. device->pwrctrl.last_stat_updated = ktime_get();
  775. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  776. return 0;
  777. }
  778. static int a6xx_rgmu_first_open(struct adreno_device *adreno_dev)
  779. {
  780. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  781. int ret;
  782. ret = a6xx_first_boot(adreno_dev);
  783. if (ret)
  784. return ret;
  785. /*
  786. * A client that does a first_open but never closes the device
  787. * may prevent us from going back to SLUMBER. So trigger the idle
  788. * check by incrementing the active count and immediately releasing it.
  789. */
  790. atomic_inc(&device->active_cnt);
  791. a6xx_rgmu_active_count_put(adreno_dev);
  792. return 0;
  793. }
  794. static int a6xx_power_off(struct adreno_device *adreno_dev)
  795. {
  796. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  797. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  798. int ret;
  799. adreno_suspend_context(device);
  800. /*
  801. * adreno_suspend_context() unlocks the device mutex, which
  802. * could allow a concurrent thread to attempt SLUMBER sequence.
  803. * Hence, check the flags before proceeding with SLUMBER.
  804. */
  805. if (!test_bit(RGMU_PRIV_GPU_STARTED, &rgmu->flags))
  806. return 0;
  807. kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
  808. ret = a6xx_rgmu_oob_set(device, oob_gpu);
  809. if (ret) {
  810. a6xx_rgmu_oob_clear(device, oob_gpu);
  811. goto no_gx_power;
  812. }
  813. if (a6xx_irq_pending(adreno_dev)) {
  814. a6xx_gmu_oob_clear(device, oob_gpu);
  815. return -EBUSY;
  816. }
  817. kgsl_pwrscale_update_stats(device);
  818. /* Save active coresight registers if applicable */
  819. adreno_coresight_stop(adreno_dev);
  820. /* Save physical performance counter values before GPU power down*/
  821. adreno_perfcounter_save(adreno_dev);
  822. adreno_irqctrl(adreno_dev, 0);
  823. a6xx_rgmu_prepare_stop(device);
  824. a6xx_rgmu_oob_clear(device, oob_gpu);
  825. no_gx_power:
  826. /* Halt all gx traffic */
  827. kgsl_regwrite(device, A6XX_GBIF_HALT, A6XX_GBIF_CLIENT_HALT_MASK);
  828. adreno_wait_for_halt_ack(device, A6XX_GBIF_HALT_ACK,
  829. A6XX_GBIF_CLIENT_HALT_MASK);
  830. kgsl_pwrctrl_irq(device, false);
  831. a6xx_rgmu_power_off(adreno_dev);
  832. adreno_set_active_ctxs_null(adreno_dev);
  833. adreno_dispatcher_stop(adreno_dev);
  834. adreno_ringbuffer_stop(adreno_dev);
  835. adreno_llcc_slice_deactivate(adreno_dev);
  836. clear_bit(RGMU_PRIV_GPU_STARTED, &rgmu->flags);
  837. del_timer_sync(&device->idle_timer);
  838. kgsl_pwrscale_sleep(device);
  839. kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
  840. return ret;
  841. }
  842. int a6xx_rgmu_reset(struct adreno_device *adreno_dev)
  843. {
  844. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  845. a6xx_disable_gpu_irq(adreno_dev);
  846. /* Hard reset the rgmu and gpu */
  847. a6xx_rgmu_suspend(adreno_dev);
  848. a6xx_reset_preempt_records(adreno_dev);
  849. adreno_llcc_slice_deactivate(adreno_dev);
  850. clear_bit(RGMU_PRIV_GPU_STARTED, &rgmu->flags);
  851. /* Attempt rebooting the rgmu and gpu */
  852. return a6xx_boot(adreno_dev);
  853. }
  854. static int a6xx_rgmu_active_count_get(struct adreno_device *adreno_dev)
  855. {
  856. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  857. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  858. int ret = 0;
  859. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  860. return -EINVAL;
  861. if (test_bit(RGMU_PRIV_PM_SUSPEND, &rgmu->flags))
  862. return -EINVAL;
  863. if (atomic_read(&device->active_cnt) == 0)
  864. ret = a6xx_boot(adreno_dev);
  865. if (ret == 0) {
  866. atomic_inc(&device->active_cnt);
  867. trace_kgsl_active_count(device,
  868. (unsigned long) __builtin_return_address(0));
  869. }
  870. return ret;
  871. }
  872. static int a6xx_rgmu_pm_suspend(struct adreno_device *adreno_dev)
  873. {
  874. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  875. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  876. int ret;
  877. if (test_bit(RGMU_PRIV_PM_SUSPEND, &rgmu->flags))
  878. return 0;
  879. kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
  880. /* Halt any new submissions */
  881. reinit_completion(&device->halt_gate);
  882. /* wait for active count so device can be put in slumber */
  883. ret = kgsl_active_count_wait(device, 0, HZ);
  884. if (ret) {
  885. dev_err(device->dev,
  886. "Timed out waiting for the active count\n");
  887. goto err;
  888. }
  889. ret = adreno_idle(device);
  890. if (ret)
  891. goto err;
  892. a6xx_power_off(adreno_dev);
  893. set_bit(RGMU_PRIV_PM_SUSPEND, &rgmu->flags);
  894. adreno_get_gpu_halt(adreno_dev);
  895. kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
  896. return 0;
  897. err:
  898. adreno_dispatcher_start(device);
  899. return ret;
  900. }
  901. static void a6xx_rgmu_pm_resume(struct adreno_device *adreno_dev)
  902. {
  903. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  904. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  905. if (WARN(!test_bit(GMU_PRIV_PM_SUSPEND, &rgmu->flags),
  906. "resume invoked without a suspend\n"))
  907. return;
  908. adreno_put_gpu_halt(adreno_dev);
  909. clear_bit(RGMU_PRIV_PM_SUSPEND, &rgmu->flags);
  910. adreno_dispatcher_start(device);
  911. }
  912. static const struct gmu_dev_ops a6xx_rgmudev = {
  913. .oob_set = a6xx_rgmu_oob_set,
  914. .oob_clear = a6xx_rgmu_oob_clear,
  915. .ifpc_store = a6xx_rgmu_ifpc_store,
  916. .ifpc_isenabled = a6xx_rgmu_ifpc_isenabled,
  917. .send_nmi = a6xx_rgmu_halt_execution,
  918. };
  919. static int a6xx_rgmu_irq_probe(struct kgsl_device *device)
  920. {
  921. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(ADRENO_DEVICE(device));
  922. int ret;
  923. ret = kgsl_request_irq(rgmu->pdev, "kgsl_oob",
  924. a6xx_oob_irq_handler, device);
  925. if (ret < 0)
  926. return ret;
  927. rgmu->oob_interrupt_num = ret;
  928. ret = kgsl_request_irq(rgmu->pdev,
  929. "kgsl_rgmu", a6xx_rgmu_irq_handler, device);
  930. if (ret < 0)
  931. return ret;
  932. rgmu->rgmu_interrupt_num = ret;
  933. return 0;
  934. }
  935. static int a6xx_rgmu_clocks_probe(struct a6xx_rgmu_device *rgmu,
  936. struct device_node *node)
  937. {
  938. int ret, i;
  939. ret = devm_clk_bulk_get_all(&rgmu->pdev->dev, &rgmu->clks);
  940. if (ret < 0)
  941. return ret;
  942. /*
  943. * Voting for apb_pclk will enable power and clocks required for
  944. * QDSS path to function. However, if QCOM_KGSL_QDSS_STM is not enabled,
  945. * QDSS is essentially unusable. Hence, if QDSS cannot be used,
  946. * don't vote for this clock.
  947. */
  948. if (!IS_ENABLED(CONFIG_QCOM_KGSL_QDSS_STM)) {
  949. for (i = 0; i < ret; i++) {
  950. if (!strcmp(rgmu->clks[i].id, "apb_pclk")) {
  951. rgmu->clks[i].clk = NULL;
  952. break;
  953. }
  954. }
  955. }
  956. rgmu->num_clks = ret;
  957. rgmu->gpu_clk = kgsl_of_clk_by_name(rgmu->clks, ret, "core");
  958. if (!rgmu->gpu_clk) {
  959. dev_err(&rgmu->pdev->dev, "The GPU clock isn't defined\n");
  960. return -ENODEV;
  961. }
  962. rgmu->rgmu_clk = kgsl_of_clk_by_name(rgmu->clks, ret, "gmu");
  963. if (!rgmu->rgmu_clk) {
  964. dev_err(&rgmu->pdev->dev, "The RGMU clock isn't defined\n");
  965. return -ENODEV;
  966. }
  967. return 0;
  968. }
  969. const struct adreno_power_ops a6xx_rgmu_power_ops = {
  970. .first_open = a6xx_rgmu_first_open,
  971. .last_close = a6xx_power_off,
  972. .active_count_get = a6xx_rgmu_active_count_get,
  973. .active_count_put = a6xx_rgmu_active_count_put,
  974. .pm_suspend = a6xx_rgmu_pm_suspend,
  975. .pm_resume = a6xx_rgmu_pm_resume,
  976. .touch_wakeup = a6xx_rgmu_touch_wakeup,
  977. .gpu_clock_set = a6xx_rgmu_clock_set,
  978. };
  979. int a6xx_rgmu_device_probe(struct platform_device *pdev,
  980. u32 chipid, const struct adreno_gpu_core *gpucore)
  981. {
  982. struct adreno_device *adreno_dev;
  983. struct kgsl_device *device;
  984. struct a6xx_device *a6xx_dev;
  985. int ret;
  986. a6xx_dev = devm_kzalloc(&pdev->dev, sizeof(*a6xx_dev),
  987. GFP_KERNEL);
  988. if (!a6xx_dev)
  989. return -ENOMEM;
  990. adreno_dev = &a6xx_dev->adreno_dev;
  991. adreno_dev->irq_mask = A6XX_INT_MASK;
  992. ret = a6xx_probe_common(pdev, adreno_dev, chipid, gpucore);
  993. if (ret)
  994. return ret;
  995. ret = adreno_dispatcher_init(adreno_dev);
  996. if (ret)
  997. return ret;
  998. device = KGSL_DEVICE(adreno_dev);
  999. INIT_WORK(&device->idle_check_ws, rgmu_idle_check);
  1000. timer_setup(&device->idle_timer, rgmu_idle_timer, 0);
  1001. return 0;
  1002. }
  1003. int a6xx_rgmu_add_to_minidump(struct adreno_device *adreno_dev)
  1004. {
  1005. struct a6xx_device *a6xx_dev = container_of(adreno_dev,
  1006. struct a6xx_device, adreno_dev);
  1007. return kgsl_add_va_to_minidump(adreno_dev->dev.dev, KGSL_A6XX_DEVICE,
  1008. (void *)(a6xx_dev), sizeof(struct a6xx_device));
  1009. }
  1010. /* Do not access any RGMU registers in RGMU probe function */
  1011. static int a6xx_rgmu_probe(struct kgsl_device *device,
  1012. struct platform_device *pdev)
  1013. {
  1014. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1015. struct a6xx_rgmu_device *rgmu = to_a6xx_rgmu(adreno_dev);
  1016. int ret;
  1017. rgmu->pdev = pdev;
  1018. /* Set up RGMU regulators */
  1019. ret = kgsl_pwrctrl_probe_regulators(device, pdev);
  1020. if (ret)
  1021. return ret;
  1022. /* Set up RGMU clocks */
  1023. ret = a6xx_rgmu_clocks_probe(rgmu, pdev->dev.of_node);
  1024. if (ret)
  1025. return ret;
  1026. ret = kgsl_regmap_add_region(&device->regmap, pdev,
  1027. "kgsl_rgmu", NULL, NULL);
  1028. if (ret) {
  1029. dev_err(&pdev->dev, "Unable to map the RGMU registers\n");
  1030. return ret;
  1031. }
  1032. /* Initialize OOB and RGMU interrupts */
  1033. ret = a6xx_rgmu_irq_probe(device);
  1034. if (ret)
  1035. return ret;
  1036. /* Set up RGMU idle states */
  1037. if (ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_IFPC)) {
  1038. rgmu->idle_level = GPU_HW_IFPC;
  1039. adreno_dev->ifpc_hyst = A6X_RGMU_LONG_IFPC_HYST;
  1040. adreno_dev->ifpc_hyst_floor = A6X_RGMU_LONG_IFPC_HYST_FLOOR;
  1041. } else {
  1042. rgmu->idle_level = GPU_HW_ACTIVE;
  1043. }
  1044. set_bit(GMU_ENABLED, &device->gmu_core.flags);
  1045. device->gmu_core.dev_ops = &a6xx_rgmudev;
  1046. return 0;
  1047. }
  1048. static void a6xx_rgmu_remove(struct kgsl_device *device)
  1049. {
  1050. memset(&device->gmu_core, 0, sizeof(device->gmu_core));
  1051. }
  1052. static int a6xx_rgmu_bind(struct device *dev, struct device *master, void *data)
  1053. {
  1054. struct kgsl_device *device = dev_get_drvdata(master);
  1055. return a6xx_rgmu_probe(device, to_platform_device(dev));
  1056. }
  1057. static void a6xx_rgmu_unbind(struct device *dev, struct device *master,
  1058. void *data)
  1059. {
  1060. struct kgsl_device *device = dev_get_drvdata(master);
  1061. a6xx_rgmu_remove(device);
  1062. }
  1063. static const struct component_ops a6xx_rgmu_component_ops = {
  1064. .bind = a6xx_rgmu_bind,
  1065. .unbind = a6xx_rgmu_unbind,
  1066. };
  1067. static int a6xx_rgmu_probe_dev(struct platform_device *pdev)
  1068. {
  1069. return component_add(&pdev->dev, &a6xx_rgmu_component_ops);
  1070. }
  1071. static int a6xx_rgmu_remove_dev(struct platform_device *pdev)
  1072. {
  1073. component_del(&pdev->dev, &a6xx_rgmu_component_ops);
  1074. return 0;
  1075. }
  1076. static const struct of_device_id a6xx_rgmu_match_table[] = {
  1077. { .compatible = "qcom,gpu-rgmu" },
  1078. { },
  1079. };
  1080. struct platform_driver a6xx_rgmu_driver = {
  1081. .probe = a6xx_rgmu_probe_dev,
  1082. .remove = a6xx_rgmu_remove_dev,
  1083. .driver = {
  1084. .name = "kgsl-rgmu",
  1085. .of_match_table = a6xx_rgmu_match_table,
  1086. },
  1087. };