adreno_sysfs.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/sysfs.h>
  7. #include "adreno.h"
  8. #include "adreno_sysfs.h"
  9. #include "kgsl_sysfs.h"
  10. static ssize_t _gpu_model_show(struct kgsl_device *device, char *buf)
  11. {
  12. return scnprintf(buf, PAGE_SIZE, adreno_get_gpu_model(device));
  13. }
  14. static ssize_t gpu_model_show(struct device *dev,
  15. struct device_attribute *attr, char *buf)
  16. {
  17. struct kgsl_device *device = dev_get_drvdata(dev);
  18. return _gpu_model_show(device, buf);
  19. }
  20. static int _l3_vote_store(struct adreno_device *adreno_dev, bool val)
  21. {
  22. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  23. if (ADRENO_FEATURE(adreno_dev, ADRENO_L3_VOTE))
  24. device->l3_vote = val;
  25. return 0;
  26. }
  27. static bool _l3_vote_show(struct adreno_device *adreno_dev)
  28. {
  29. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  30. return device->l3_vote;
  31. }
  32. static int _ft_policy_store(struct adreno_device *adreno_dev,
  33. unsigned int val)
  34. {
  35. adreno_dev->ft_policy = val & KGSL_FT_POLICY_MASK;
  36. return 0;
  37. }
  38. static unsigned int _ft_policy_show(struct adreno_device *adreno_dev)
  39. {
  40. return adreno_dev->ft_policy;
  41. }
  42. static int _ft_pagefault_policy_store(struct adreno_device *adreno_dev,
  43. unsigned int val)
  44. {
  45. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  46. int ret = 0;
  47. mutex_lock(&device->mutex);
  48. val &= KGSL_FT_PAGEFAULT_MASK;
  49. if (device->state == KGSL_STATE_ACTIVE)
  50. ret = kgsl_mmu_set_pagefault_policy(&device->mmu,
  51. (unsigned long) val);
  52. if (ret == 0)
  53. device->mmu.pfpolicy = val;
  54. mutex_unlock(&device->mutex);
  55. return 0;
  56. }
  57. static unsigned int _ft_pagefault_policy_show(struct adreno_device *adreno_dev)
  58. {
  59. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  60. return device->mmu.pfpolicy;
  61. }
  62. static int _rt_bus_hint_store(struct adreno_device *adreno_dev, u32 val)
  63. {
  64. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  65. struct kgsl_pwrctrl *pwrctrl = &device->pwrctrl;
  66. if (val > pwrctrl->pwrlevels[0].bus_max)
  67. return -EINVAL;
  68. adreno_power_cycle_u32(adreno_dev, &pwrctrl->rt_bus_hint, val);
  69. return 0;
  70. }
  71. static u32 _rt_bus_hint_show(struct adreno_device *adreno_dev)
  72. {
  73. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  74. return device->pwrctrl.rt_bus_hint;
  75. }
  76. static int _gpu_llc_slice_enable_store(struct adreno_device *adreno_dev,
  77. bool val)
  78. {
  79. if (IS_ERR_OR_NULL(adreno_dev->gpu_llc_slice) ||
  80. (adreno_dev->gpu_llc_slice_enable == val))
  81. return 0;
  82. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->gpu_llc_slice_enable, val);
  83. }
  84. static bool _gpu_llc_slice_enable_show(struct adreno_device *adreno_dev)
  85. {
  86. return adreno_dev->gpu_llc_slice_enable;
  87. }
  88. static int _gpuhtw_llc_slice_enable_store(struct adreno_device *adreno_dev,
  89. bool val)
  90. {
  91. if (IS_ERR_OR_NULL(adreno_dev->gpuhtw_llc_slice) ||
  92. (adreno_dev->gpuhtw_llc_slice_enable == val))
  93. return 0;
  94. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->gpuhtw_llc_slice_enable, val);
  95. }
  96. static bool _gpuhtw_llc_slice_enable_show(struct adreno_device *adreno_dev)
  97. {
  98. return adreno_dev->gpuhtw_llc_slice_enable;
  99. }
  100. static bool _ft_hang_intr_status_show(struct adreno_device *adreno_dev)
  101. {
  102. /* Hang interrupt is always on on all targets */
  103. return true;
  104. }
  105. static int _hwcg_store(struct adreno_device *adreno_dev, bool val)
  106. {
  107. if (adreno_dev->hwcg_enabled == val)
  108. return 0;
  109. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->hwcg_enabled,
  110. val);
  111. }
  112. static bool _hwcg_show(struct adreno_device *adreno_dev)
  113. {
  114. return adreno_dev->hwcg_enabled;
  115. }
  116. static int _throttling_store(struct adreno_device *adreno_dev, bool val)
  117. {
  118. if (!adreno_is_a540(adreno_dev) ||
  119. adreno_dev->throttling_enabled == val)
  120. return 0;
  121. return adreno_power_cycle_bool(adreno_dev,
  122. &adreno_dev->throttling_enabled, val);
  123. }
  124. static bool _throttling_show(struct adreno_device *adreno_dev)
  125. {
  126. return adreno_dev->throttling_enabled;
  127. }
  128. static int _sptp_pc_store(struct adreno_device *adreno_dev, bool val)
  129. {
  130. if (!ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC) ||
  131. adreno_dev->sptp_pc_enabled == val)
  132. return 0;
  133. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->sptp_pc_enabled,
  134. val);
  135. }
  136. static bool _sptp_pc_show(struct adreno_device *adreno_dev)
  137. {
  138. return adreno_dev->sptp_pc_enabled;
  139. }
  140. static int _lm_store(struct adreno_device *adreno_dev, bool val)
  141. {
  142. if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
  143. adreno_dev->lm_enabled == val)
  144. return 0;
  145. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->lm_enabled,
  146. val);
  147. }
  148. static bool _lm_show(struct adreno_device *adreno_dev)
  149. {
  150. return adreno_dev->lm_enabled;
  151. }
  152. static int _ifpc_store(struct adreno_device *adreno_dev, bool val)
  153. {
  154. return gmu_core_dev_ifpc_store(KGSL_DEVICE(adreno_dev), val);
  155. }
  156. static bool _ifpc_show(struct adreno_device *adreno_dev)
  157. {
  158. return gmu_core_dev_ifpc_isenabled(KGSL_DEVICE(adreno_dev));
  159. }
  160. static int _touch_wake_store(struct adreno_device *adreno_dev, bool val)
  161. {
  162. if (val)
  163. adreno_touch_wake(KGSL_DEVICE(adreno_dev));
  164. return 0;
  165. }
  166. static bool _touch_wake_show(struct adreno_device *adreno_dev)
  167. {
  168. return false;
  169. }
  170. static unsigned int _ifpc_count_show(struct adreno_device *adreno_dev)
  171. {
  172. return adreno_dev->ifpc_count;
  173. }
  174. static bool _acd_show(struct adreno_device *adreno_dev)
  175. {
  176. return adreno_dev->acd_enabled;
  177. }
  178. static int _acd_store(struct adreno_device *adreno_dev, bool val)
  179. {
  180. return gmu_core_dev_acd_set(KGSL_DEVICE(adreno_dev), val);
  181. }
  182. static bool _gmu_ab_show(struct adreno_device *adreno_dev)
  183. {
  184. return adreno_dev->gmu_ab;
  185. }
  186. static int _gmu_ab_store(struct adreno_device *adreno_dev, bool val)
  187. {
  188. if (!test_bit(ADRENO_DEVICE_GMU_AB, &adreno_dev->priv) ||
  189. (adreno_dev->gmu_ab == val))
  190. return 0;
  191. /* Power cycle the GPU for changes to take effect */
  192. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->gmu_ab, val);
  193. }
  194. static bool _bcl_show(struct adreno_device *adreno_dev)
  195. {
  196. return adreno_dev->bcl_enabled;
  197. }
  198. static int _bcl_store(struct adreno_device *adreno_dev, bool val)
  199. {
  200. if (!ADRENO_FEATURE(adreno_dev, ADRENO_BCL) ||
  201. adreno_dev->bcl_enabled == val)
  202. return 0;
  203. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->bcl_enabled,
  204. val);
  205. }
  206. static bool _clx_show(struct adreno_device *adreno_dev)
  207. {
  208. return adreno_dev->clx_enabled;
  209. }
  210. static int _clx_store(struct adreno_device *adreno_dev, bool val)
  211. {
  212. if (!ADRENO_FEATURE(adreno_dev, ADRENO_CLX) || adreno_dev->clx_enabled == val)
  213. return 0;
  214. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->clx_enabled, val);
  215. }
  216. static bool _dms_show(struct adreno_device *adreno_dev)
  217. {
  218. return adreno_dev->dms_enabled;
  219. }
  220. static int _dms_store(struct adreno_device *adreno_dev, bool val)
  221. {
  222. if (!test_bit(ADRENO_DEVICE_DMS, &adreno_dev->priv) ||
  223. adreno_dev->dms_enabled == val)
  224. return 0;
  225. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->dms_enabled, val);
  226. }
  227. static bool _perfcounter_show(struct adreno_device *adreno_dev)
  228. {
  229. return adreno_dev->perfcounter;
  230. }
  231. static int _perfcounter_store(struct adreno_device *adreno_dev, bool val)
  232. {
  233. if (adreno_dev->perfcounter == val)
  234. return 0;
  235. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->perfcounter, val);
  236. }
  237. static bool _lpac_show(struct adreno_device *adreno_dev)
  238. {
  239. return adreno_dev->lpac_enabled;
  240. }
  241. static int _lpac_store(struct adreno_device *adreno_dev, bool val)
  242. {
  243. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  244. if (gpudev->lpac_store)
  245. return gpudev->lpac_store(adreno_dev, val);
  246. else
  247. return -EINVAL;
  248. }
  249. ssize_t adreno_sysfs_store_u32(struct device *dev,
  250. struct device_attribute *attr, const char *buf, size_t count)
  251. {
  252. struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_get_drvdata(dev));
  253. const struct adreno_sysfs_attribute_u32 *_attr =
  254. container_of(attr, struct adreno_sysfs_attribute_u32, attr);
  255. u32 val;
  256. int ret;
  257. ret = kstrtou32(buf, 0, &val);
  258. if (ret)
  259. return ret;
  260. ret = _attr->store(adreno_dev, val);
  261. if (ret)
  262. return ret;
  263. return count;
  264. }
  265. ssize_t adreno_sysfs_show_u32(struct device *dev,
  266. struct device_attribute *attr, char *buf)
  267. {
  268. struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_get_drvdata(dev));
  269. const struct adreno_sysfs_attribute_u32 *_attr =
  270. container_of(attr, struct adreno_sysfs_attribute_u32, attr);
  271. return scnprintf(buf, PAGE_SIZE, "0x%X\n", _attr->show(adreno_dev));
  272. }
  273. ssize_t adreno_sysfs_store_bool(struct device *dev,
  274. struct device_attribute *attr, const char *buf, size_t count)
  275. {
  276. struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_get_drvdata(dev));
  277. const struct adreno_sysfs_attribute_bool *_attr =
  278. container_of(attr, struct adreno_sysfs_attribute_bool, attr);
  279. bool val;
  280. int ret;
  281. ret = kstrtobool(buf, &val);
  282. if (ret)
  283. return ret;
  284. ret = _attr->store(adreno_dev, val);
  285. if (ret)
  286. return ret;
  287. return count;
  288. }
  289. ssize_t adreno_sysfs_show_bool(struct device *dev,
  290. struct device_attribute *attr, char *buf)
  291. {
  292. struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_get_drvdata(dev));
  293. const struct adreno_sysfs_attribute_bool *_attr =
  294. container_of(attr, struct adreno_sysfs_attribute_bool, attr);
  295. return scnprintf(buf, PAGE_SIZE, "%d\n", _attr->show(adreno_dev));
  296. }
  297. static ADRENO_SYSFS_U32(ft_policy);
  298. static ADRENO_SYSFS_U32(ft_pagefault_policy);
  299. static ADRENO_SYSFS_U32(rt_bus_hint);
  300. static ADRENO_SYSFS_RO_BOOL(ft_hang_intr_status);
  301. static ADRENO_SYSFS_BOOL(gpu_llc_slice_enable);
  302. static ADRENO_SYSFS_BOOL(gpuhtw_llc_slice_enable);
  303. static DEVICE_INT_ATTR(wake_nice, 0644, adreno_wake_nice);
  304. static DEVICE_INT_ATTR(wake_timeout, 0644, adreno_wake_timeout);
  305. static ADRENO_SYSFS_BOOL(sptp_pc);
  306. static ADRENO_SYSFS_BOOL(lm);
  307. static ADRENO_SYSFS_BOOL(hwcg);
  308. static ADRENO_SYSFS_BOOL(throttling);
  309. static ADRENO_SYSFS_BOOL(ifpc);
  310. static ADRENO_SYSFS_RO_U32(ifpc_count);
  311. static ADRENO_SYSFS_BOOL(acd);
  312. static ADRENO_SYSFS_BOOL(bcl);
  313. static ADRENO_SYSFS_BOOL(clx);
  314. static ADRENO_SYSFS_BOOL(l3_vote);
  315. static ADRENO_SYSFS_BOOL(perfcounter);
  316. static ADRENO_SYSFS_BOOL(lpac);
  317. static ADRENO_SYSFS_BOOL(dms);
  318. static ADRENO_SYSFS_BOOL(touch_wake);
  319. static ADRENO_SYSFS_BOOL(gmu_ab);
  320. static DEVICE_ATTR_RO(gpu_model);
  321. static const struct attribute *_attr_list[] = {
  322. &adreno_attr_ft_policy.attr.attr,
  323. &adreno_attr_ft_pagefault_policy.attr.attr,
  324. &adreno_attr_rt_bus_hint.attr.attr,
  325. &adreno_attr_ft_hang_intr_status.attr.attr,
  326. &dev_attr_wake_nice.attr.attr,
  327. &dev_attr_wake_timeout.attr.attr,
  328. &adreno_attr_sptp_pc.attr.attr,
  329. &adreno_attr_lm.attr.attr,
  330. &adreno_attr_hwcg.attr.attr,
  331. &adreno_attr_throttling.attr.attr,
  332. &adreno_attr_gpu_llc_slice_enable.attr.attr,
  333. &adreno_attr_gpuhtw_llc_slice_enable.attr.attr,
  334. &adreno_attr_ifpc.attr.attr,
  335. &adreno_attr_ifpc_count.attr.attr,
  336. &adreno_attr_acd.attr.attr,
  337. &adreno_attr_bcl.attr.attr,
  338. &dev_attr_gpu_model.attr,
  339. &adreno_attr_l3_vote.attr.attr,
  340. &adreno_attr_perfcounter.attr.attr,
  341. &adreno_attr_lpac.attr.attr,
  342. &adreno_attr_dms.attr.attr,
  343. &adreno_attr_touch_wake.attr.attr,
  344. &adreno_attr_gmu_ab.attr.attr,
  345. &adreno_attr_clx.attr.attr,
  346. NULL,
  347. };
  348. static GPU_SYSFS_ATTR(gpu_model, 0444, _gpu_model_show, NULL);
  349. /**
  350. * adreno_sysfs_init() - Initialize adreno sysfs files
  351. * @adreno_dev: Pointer to the adreno device
  352. *
  353. * Initialize many of the adreno specific sysfs files especially for fault
  354. * tolerance and power control
  355. */
  356. int adreno_sysfs_init(struct adreno_device *adreno_dev)
  357. {
  358. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  359. int ret;
  360. ret = sysfs_create_files(&device->dev->kobj, _attr_list);
  361. if (!ret) {
  362. /* Notify userspace */
  363. kobject_uevent(&device->dev->kobj, KOBJ_ADD);
  364. ret = sysfs_create_file(&device->gpu_sysfs_kobj,
  365. &gpu_sysfs_attr_gpu_model.attr);
  366. }
  367. return ret;
  368. }