kgsl_util.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/clk.h>
  7. #include <linux/clk-provider.h>
  8. #include <linux/delay.h>
  9. #include <linux/device.h>
  10. #include <linux/firmware.h>
  11. #include <linux/ktime.h>
  12. #include <linux/of_address.h>
  13. #include <linux/qcom_scm.h>
  14. #include <linux/regulator/consumer.h>
  15. #include <linux/slab.h>
  16. #include <linux/soc/qcom/mdt_loader.h>
  17. #include <linux/string.h>
  18. #include <linux/version.h>
  19. #include <soc/qcom/minidump.h>
  20. #include "adreno.h"
  21. #include "kgsl_util.h"
  22. bool kgsl_regulator_disable_wait(struct regulator *reg, u32 timeout)
  23. {
  24. ktime_t tout = ktime_add_us(ktime_get(), timeout * 1000);
  25. if (IS_ERR_OR_NULL(reg))
  26. return true;
  27. regulator_disable(reg);
  28. for (;;) {
  29. if (!regulator_is_enabled(reg))
  30. return true;
  31. if (ktime_compare(ktime_get(), tout) > 0)
  32. return (!regulator_is_enabled(reg));
  33. usleep_range((100 >> 2) + 1, 100);
  34. }
  35. }
  36. struct clk *kgsl_of_clk_by_name(struct clk_bulk_data *clks, int count,
  37. const char *id)
  38. {
  39. int i;
  40. for (i = 0; clks && i < count; i++)
  41. if (!strcmp(clks[i].id, id))
  42. return clks[i].clk;
  43. return NULL;
  44. }
  45. int kgsl_regulator_set_voltage(struct device *dev,
  46. struct regulator *reg, u32 voltage)
  47. {
  48. int ret;
  49. if (IS_ERR_OR_NULL(reg))
  50. return 0;
  51. ret = regulator_set_voltage(reg, voltage, INT_MAX);
  52. if (ret)
  53. dev_err(dev, "Regulator set voltage:%d failed:%d\n", voltage, ret);
  54. return ret;
  55. }
  56. int kgsl_clk_set_rate(struct clk_bulk_data *clks, int num_clks,
  57. const char *id, unsigned long rate)
  58. {
  59. struct clk *clk;
  60. clk = kgsl_of_clk_by_name(clks, num_clks, id);
  61. if (!clk)
  62. return -ENODEV;
  63. return clk_set_rate(clk, rate);
  64. }
  65. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  66. int kgsl_scm_gpu_init_regs(struct device *dev, u32 gpu_req)
  67. {
  68. int ret;
  69. if (!gpu_req)
  70. return -EOPNOTSUPP;
  71. ret = qcom_scm_kgsl_init_regs(gpu_req);
  72. if (ret)
  73. dev_err(dev, "Scm call for requests:0x%x failed with ret:: %d\n",
  74. gpu_req, ret);
  75. return ret;
  76. }
  77. #endif
  78. /*
  79. * The PASID has stayed consistent across all targets thus far so we are
  80. * cautiously optimistic that we can hard code it
  81. */
  82. #define GPU_PASID 13
  83. int kgsl_zap_shader_load(struct device *dev, const char *name)
  84. {
  85. struct device_node *np, *mem_np;
  86. const struct firmware *fw;
  87. void *mem_region = NULL;
  88. phys_addr_t mem_phys;
  89. struct resource res;
  90. ssize_t mem_size;
  91. int ret;
  92. np = of_get_child_by_name(dev->of_node, "zap-shader");
  93. if (!np) {
  94. dev_err(dev, "zap-shader node not found. Please update the device tree\n");
  95. return -ENODEV;
  96. }
  97. mem_np = of_parse_phandle(np, "memory-region", 0);
  98. of_node_put(np);
  99. if (!mem_np) {
  100. dev_err(dev, "Couldn't parse the mem-region from the zap-shader node\n");
  101. return -EINVAL;
  102. }
  103. ret = of_address_to_resource(mem_np, 0, &res);
  104. of_node_put(mem_np);
  105. if (ret)
  106. return ret;
  107. ret = request_firmware(&fw, name, dev);
  108. if (ret) {
  109. dev_err(dev, "Couldn't load the firmware %s\n", name);
  110. return ret;
  111. }
  112. mem_size = qcom_mdt_get_size(fw);
  113. if (mem_size < 0) {
  114. ret = mem_size;
  115. goto out;
  116. }
  117. if (mem_size > resource_size(&res)) {
  118. ret = -E2BIG;
  119. goto out;
  120. }
  121. mem_phys = res.start;
  122. mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
  123. if (!mem_region) {
  124. ret = -ENOMEM;
  125. goto out;
  126. }
  127. ret = qcom_mdt_load(dev, fw, name, GPU_PASID, mem_region,
  128. mem_phys, mem_size, NULL);
  129. if (ret) {
  130. dev_err(dev, "Error %d while loading the MDT\n", ret);
  131. goto out;
  132. }
  133. ret = qcom_scm_pas_auth_and_reset(GPU_PASID);
  134. out:
  135. if (mem_region)
  136. memunmap(mem_region);
  137. release_firmware(fw);
  138. return ret;
  139. }
  140. int kgsl_zap_shader_unload(struct device *dev)
  141. {
  142. int ret;
  143. ret = qcom_scm_pas_shutdown_retry(GPU_PASID);
  144. if (ret)
  145. dev_err(dev, "Error %d while PAS shutdown\n", ret);
  146. return ret;
  147. }
  148. int kgsl_hwlock(struct cpu_gpu_lock *lock)
  149. {
  150. unsigned long timeout = jiffies + msecs_to_jiffies(1000);
  151. /* Indicate that the CPU wants the lock */
  152. lock->cpu_req = 1;
  153. /* post the request */
  154. wmb();
  155. /* Wait for our turn */
  156. lock->turn = 0;
  157. /* Finish all memory transactions before moving on */
  158. mb();
  159. /*
  160. * Spin here while GPU ucode holds the lock, lock->gpu_req will
  161. * be set to 0 after GPU ucode releases the lock. Maximum wait time
  162. * is 1 second and this should be enough for GPU to release the lock.
  163. */
  164. while (lock->gpu_req && lock->turn == 0) {
  165. cpu_relax();
  166. /* Get the latest updates from GPU */
  167. rmb();
  168. if (time_after(jiffies, timeout))
  169. break;
  170. }
  171. if (lock->gpu_req && lock->turn == 0)
  172. return -EBUSY;
  173. return 0;
  174. }
  175. void kgsl_hwunlock(struct cpu_gpu_lock *lock)
  176. {
  177. /* Make sure all writes are done before releasing the lock */
  178. wmb();
  179. lock->cpu_req = 0;
  180. }
  181. #if IS_ENABLED(CONFIG_QCOM_VA_MINIDUMP)
  182. void kgsl_add_to_minidump(char *name, u64 virt_addr, u64 phy_addr, size_t size)
  183. {
  184. struct md_region md_entry = {0};
  185. int ret;
  186. if (!msm_minidump_enabled())
  187. return;
  188. scnprintf(md_entry.name, sizeof(md_entry.name), name);
  189. md_entry.virt_addr = virt_addr;
  190. md_entry.phys_addr = phy_addr;
  191. md_entry.size = size;
  192. ret = msm_minidump_add_region(&md_entry);
  193. if (ret < 0 && ret != -EEXIST)
  194. pr_err("kgsl: Failed to register %s with minidump:%d\n", name, ret);
  195. }
  196. void kgsl_remove_from_minidump(char *name, u64 virt_addr, u64 phy_addr, size_t size)
  197. {
  198. struct md_region md_entry = {0};
  199. int ret;
  200. if (!msm_minidump_enabled())
  201. return;
  202. scnprintf(md_entry.name, sizeof(md_entry.name), name);
  203. md_entry.virt_addr = virt_addr;
  204. md_entry.phys_addr = phy_addr;
  205. md_entry.size = size;
  206. ret = msm_minidump_remove_region(&md_entry);
  207. if (ret < 0 && ret != -ENOENT)
  208. pr_err("kgsl: Failed to remove %s from minidump\n", name);
  209. }
  210. int kgsl_add_va_to_minidump(struct device *dev, const char *name, void *ptr,
  211. size_t size)
  212. {
  213. struct va_md_entry entry = {0};
  214. int ret;
  215. scnprintf(entry.owner, sizeof(entry.owner), name);
  216. entry.vaddr = (u64)(ptr);
  217. entry.size = size;
  218. ret = qcom_va_md_add_region(&entry);
  219. if (ret < 0)
  220. dev_err(dev, "Failed to register %s with va_minidump: %d\n", name,
  221. ret);
  222. return ret;
  223. }
  224. static int kgsl_add_driver_data_to_va_minidump(struct kgsl_device *device)
  225. {
  226. int ret;
  227. char name[MAX_VA_MINIDUMP_STR_LEN];
  228. struct kgsl_pagetable *pt;
  229. struct adreno_context *ctxt;
  230. struct kgsl_process_private *p;
  231. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  232. ret = kgsl_add_va_to_minidump(device->dev, KGSL_DRIVER,
  233. (void *)(&kgsl_driver), sizeof(struct kgsl_driver));
  234. if (ret)
  235. return ret;
  236. /* hwsched path may not have scratch entry */
  237. if (device->scratch) {
  238. ret = kgsl_add_va_to_minidump(device->dev, KGSL_SCRATCH_ENTRY,
  239. device->scratch->hostptr, device->scratch->size);
  240. if (ret)
  241. return ret;
  242. }
  243. ret = kgsl_add_va_to_minidump(device->dev, KGSL_MEMSTORE_ENTRY,
  244. device->memstore->hostptr, device->memstore->size);
  245. if (ret)
  246. return ret;
  247. spin_lock(&adreno_dev->active_list_lock);
  248. list_for_each_entry(ctxt, &adreno_dev->active_list, active_node) {
  249. snprintf(name, sizeof(name), KGSL_ADRENO_CTX_ENTRY"_%d", ctxt->base.id);
  250. ret = kgsl_add_va_to_minidump(device->dev, name,
  251. (void *)(ctxt), sizeof(struct adreno_context));
  252. if (ret)
  253. break;
  254. }
  255. spin_unlock(&adreno_dev->active_list_lock);
  256. read_lock(&kgsl_driver.proclist_lock);
  257. list_for_each_entry(p, &kgsl_driver.process_list, list) {
  258. snprintf(name, sizeof(name), KGSL_PROC_PRIV_ENTRY "_%d", pid_nr(p->pid));
  259. ret = kgsl_add_va_to_minidump(device->dev, name,
  260. (void *)(p), sizeof(struct kgsl_process_private));
  261. if (ret)
  262. break;
  263. }
  264. read_unlock(&kgsl_driver.proclist_lock);
  265. spin_lock(&kgsl_driver.ptlock);
  266. list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
  267. snprintf(name, sizeof(name), KGSL_PGTABLE_ENTRY"_%d", pt->name);
  268. ret = kgsl_add_va_to_minidump(device->dev, name,
  269. (void *)(pt), sizeof(struct kgsl_pagetable));
  270. if (ret)
  271. break;
  272. }
  273. spin_unlock(&kgsl_driver.ptlock);
  274. return ret;
  275. }
  276. static int kgsl_va_minidump_callback(struct notifier_block *nb,
  277. unsigned long action, void *unused)
  278. {
  279. struct adreno_device *adreno_dev = ADRENO_DEVICE(kgsl_driver.devp[0]);
  280. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  281. if (kgsl_add_driver_data_to_va_minidump(kgsl_driver.devp[0]))
  282. return NOTIFY_BAD;
  283. if (gpudev->add_to_va_minidump(adreno_dev))
  284. return NOTIFY_BAD;
  285. return NOTIFY_OK;
  286. }
  287. static struct notifier_block kgsl_va_minidump_nb = {
  288. .priority = INT_MAX,
  289. .notifier_call = kgsl_va_minidump_callback,
  290. };
  291. void kgsl_qcom_va_md_register(struct kgsl_device *device)
  292. {
  293. int ret;
  294. if (!qcom_va_md_enabled())
  295. return;
  296. ret = qcom_va_md_register("KGSL", &kgsl_va_minidump_nb);
  297. if (ret)
  298. dev_err(device->dev, "Failed to register notifier with va_minidump: %d\n", ret);
  299. }
  300. void kgsl_qcom_va_md_unregister(struct kgsl_device *device)
  301. {
  302. int ret;
  303. if (!qcom_va_md_enabled())
  304. return;
  305. ret = qcom_va_md_unregister("KGSL", &kgsl_va_minidump_nb);
  306. if (ret)
  307. dev_err(device->dev, "Failed to unregister notifier with va_minidump: %d\n", ret);
  308. }
  309. #endif