adreno_debugfs.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2002,2008-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include "adreno.h"
  8. extern struct dentry *kgsl_debugfs_dir;
  9. static void set_isdb(struct adreno_device *adreno_dev, void *priv)
  10. {
  11. set_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv);
  12. }
  13. static int _isdb_set(void *data, u64 val)
  14. {
  15. struct kgsl_device *device = data;
  16. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  17. /* Once ISDB goes enabled it stays enabled */
  18. if (test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv))
  19. return 0;
  20. /*
  21. * Bring down the GPU so we can bring it back up with the correct power
  22. * and clock settings
  23. */
  24. return adreno_power_cycle(adreno_dev, set_isdb, NULL);
  25. }
  26. static int _isdb_get(void *data, u64 *val)
  27. {
  28. struct kgsl_device *device = data;
  29. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  30. *val = (u64) test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv);
  31. return 0;
  32. }
  33. DEFINE_DEBUGFS_ATTRIBUTE(_isdb_fops, _isdb_get, _isdb_set, "%llu\n");
  34. static int _ctxt_record_size_set(void *data, u64 val)
  35. {
  36. struct kgsl_device *device = data;
  37. device->snapshot_ctxt_record_size = val;
  38. return 0;
  39. }
  40. static int _ctxt_record_size_get(void *data, u64 *val)
  41. {
  42. struct kgsl_device *device = data;
  43. *val = device->snapshot_ctxt_record_size;
  44. return 0;
  45. }
  46. DEFINE_DEBUGFS_ATTRIBUTE(_ctxt_record_size_fops, _ctxt_record_size_get,
  47. _ctxt_record_size_set, "%llu\n");
  48. static int _lm_limit_set(void *data, u64 val)
  49. {
  50. struct kgsl_device *device = data;
  51. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  52. if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
  53. return 0;
  54. /* assure value is between 3A and 10A */
  55. if (val > 10000)
  56. val = 10000;
  57. else if (val < 3000)
  58. val = 3000;
  59. if (adreno_dev->lm_enabled)
  60. return adreno_power_cycle_u32(adreno_dev,
  61. &adreno_dev->lm_limit, val);
  62. return 0;
  63. }
  64. static int _lm_limit_get(void *data, u64 *val)
  65. {
  66. struct kgsl_device *device = data;
  67. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  68. if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
  69. *val = 0;
  70. *val = (u64) adreno_dev->lm_limit;
  71. return 0;
  72. }
  73. DEFINE_DEBUGFS_ATTRIBUTE(_lm_limit_fops, _lm_limit_get,
  74. _lm_limit_set, "%llu\n");
  75. static int _lm_threshold_count_get(void *data, u64 *val)
  76. {
  77. struct kgsl_device *device = data;
  78. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  79. if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
  80. *val = 0;
  81. else
  82. *val = (u64) adreno_dev->lm_threshold_cross;
  83. return 0;
  84. }
  85. DEFINE_DEBUGFS_ATTRIBUTE(_lm_threshold_fops, _lm_threshold_count_get,
  86. NULL, "%llu\n");
  87. static int _active_count_get(void *data, u64 *val)
  88. {
  89. struct kgsl_device *device = data;
  90. unsigned int i = atomic_read(&device->active_cnt);
  91. *val = (u64) i;
  92. return 0;
  93. }
  94. DEFINE_DEBUGFS_ATTRIBUTE(_active_count_fops, _active_count_get, NULL, "%llu\n");
  95. static int _coop_reset_set(void *data, u64 val)
  96. {
  97. struct kgsl_device *device = data;
  98. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  99. if (ADRENO_FEATURE(adreno_dev, ADRENO_COOP_RESET))
  100. adreno_dev->cooperative_reset = val ? true : false;
  101. return 0;
  102. }
  103. static int _coop_reset_get(void *data, u64 *val)
  104. {
  105. struct kgsl_device *device = data;
  106. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  107. *val = (u64) adreno_dev->cooperative_reset;
  108. return 0;
  109. }
  110. DEFINE_DEBUGFS_ATTRIBUTE(_coop_reset_fops, _coop_reset_get,
  111. _coop_reset_set, "%llu\n");
  112. static void set_gpu_client_pf(struct adreno_device *adreno_dev, void *priv)
  113. {
  114. adreno_dev->uche_client_pf = *((u32 *)priv);
  115. adreno_dev->patch_reglist = false;
  116. }
  117. static int _gpu_client_pf_set(void *data, u64 val)
  118. {
  119. struct kgsl_device *device = data;
  120. return adreno_power_cycle(ADRENO_DEVICE(device), set_gpu_client_pf, &val);
  121. }
  122. static int _gpu_client_pf_get(void *data, u64 *val)
  123. {
  124. struct kgsl_device *device = data;
  125. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  126. *val = (u64) adreno_dev->uche_client_pf;
  127. return 0;
  128. }
  129. DEFINE_DEBUGFS_ATTRIBUTE(_gpu_client_pf_fops, _gpu_client_pf_get,
  130. _gpu_client_pf_set, "%llu\n");
  131. typedef void (*reg_read_init_t)(struct kgsl_device *device);
  132. typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
  133. unsigned int *vals, int linec);
  134. static void sync_event_print(struct seq_file *s,
  135. struct kgsl_drawobj_sync_event *sync_event)
  136. {
  137. switch (sync_event->type) {
  138. case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
  139. seq_printf(s, "sync: ctx: %u ts: %u",
  140. sync_event->context->id, sync_event->timestamp);
  141. break;
  142. }
  143. case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
  144. int i;
  145. struct event_fence_info *info = sync_event->priv;
  146. for (i = 0; info && i < info->num_fences; i++)
  147. seq_printf(s, "sync: %s",
  148. info->fences[i].name);
  149. break;
  150. }
  151. case KGSL_CMD_SYNCPOINT_TYPE_TIMELINE: {
  152. int j;
  153. struct event_timeline_info *info = sync_event->priv;
  154. for (j = 0; info && info[j].timeline; j++)
  155. seq_printf(s, "timeline: %d seqno: %lld",
  156. info[j].timeline, info[j].seqno);
  157. break;
  158. }
  159. default:
  160. seq_printf(s, "sync: type: %d", sync_event->type);
  161. break;
  162. }
  163. }
  164. struct flag_entry {
  165. unsigned long mask;
  166. const char *str;
  167. };
  168. static void _print_flags(struct seq_file *s, const struct flag_entry *table,
  169. unsigned long flags)
  170. {
  171. int i;
  172. int first = 1;
  173. for (i = 0; table[i].str; i++) {
  174. if (flags & table[i].mask) {
  175. seq_printf(s, "%c%s", first ? '\0' : '|', table[i].str);
  176. flags &= ~(table[i].mask);
  177. first = 0;
  178. }
  179. }
  180. if (flags) {
  181. seq_printf(s, "%c0x%lx", first ? '\0' : '|', flags);
  182. first = 0;
  183. }
  184. if (first)
  185. seq_puts(s, "None");
  186. }
  187. #define print_flags(_s, _flag, _array...) \
  188. ({ \
  189. const struct flag_entry symbols[] = \
  190. { _array, { -1, NULL } }; \
  191. _print_flags(_s, symbols, _flag); \
  192. })
  193. static void syncobj_print(struct seq_file *s,
  194. struct kgsl_drawobj_sync *syncobj)
  195. {
  196. struct kgsl_drawobj_sync_event *event;
  197. unsigned int i;
  198. seq_puts(s, " syncobj ");
  199. for (i = 0; i < syncobj->numsyncs; i++) {
  200. event = &syncobj->synclist[i];
  201. if (!kgsl_drawobj_event_pending(syncobj, i))
  202. continue;
  203. sync_event_print(s, event);
  204. seq_puts(s, "\n");
  205. }
  206. }
  207. static void cmdobj_print(struct seq_file *s,
  208. struct kgsl_drawobj_cmd *cmdobj)
  209. {
  210. struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
  211. if (drawobj->type == CMDOBJ_TYPE)
  212. seq_puts(s, " cmdobj ");
  213. else
  214. seq_puts(s, " markerobj ");
  215. seq_printf(s, "\t %u ", drawobj->timestamp);
  216. seq_puts(s, " priv: ");
  217. print_flags(s, cmdobj->priv,
  218. { BIT(CMDOBJ_SKIP), "skip"},
  219. { BIT(CMDOBJ_FORCE_PREAMBLE), "force_preamble"},
  220. { BIT(CMDOBJ_WFI), "wait_for_idle" });
  221. }
  222. static void drawobj_print(struct seq_file *s,
  223. struct kgsl_drawobj *drawobj)
  224. {
  225. if (!kref_get_unless_zero(&drawobj->refcount))
  226. return;
  227. if (drawobj->type == SYNCOBJ_TYPE)
  228. syncobj_print(s, SYNCOBJ(drawobj));
  229. else if ((drawobj->type == CMDOBJ_TYPE) ||
  230. (drawobj->type == MARKEROBJ_TYPE))
  231. cmdobj_print(s, CMDOBJ(drawobj));
  232. seq_puts(s, " flags: ");
  233. print_flags(s, drawobj->flags, KGSL_DRAWOBJ_FLAGS);
  234. kgsl_drawobj_put(drawobj);
  235. seq_puts(s, "\n");
  236. }
  237. static int ctx_print(struct seq_file *s, void *unused)
  238. {
  239. struct adreno_context *drawctxt = s->private;
  240. unsigned int i;
  241. struct kgsl_event *event;
  242. unsigned int queued = 0, consumed = 0, retired = 0;
  243. seq_printf(s, "id: %u type: %s priority: %d process: %s (%d) tid: %d\n",
  244. drawctxt->base.id,
  245. kgsl_context_type(drawctxt->type),
  246. drawctxt->base.priority,
  247. drawctxt->base.proc_priv->comm,
  248. pid_nr(drawctxt->base.proc_priv->pid),
  249. drawctxt->base.tid);
  250. seq_puts(s, "flags: ");
  251. print_flags(s, drawctxt->base.flags & ~(KGSL_CONTEXT_PRIORITY_MASK
  252. | KGSL_CONTEXT_TYPE_MASK), KGSL_CONTEXT_FLAGS);
  253. seq_puts(s, " priv: ");
  254. print_flags(s, drawctxt->base.priv,
  255. { BIT(KGSL_CONTEXT_PRIV_SUBMITTED), "submitted"},
  256. { BIT(KGSL_CONTEXT_PRIV_DETACHED), "detached"},
  257. { BIT(KGSL_CONTEXT_PRIV_INVALID), "invalid"},
  258. { BIT(KGSL_CONTEXT_PRIV_PAGEFAULT), "pagefault"},
  259. { BIT(ADRENO_CONTEXT_FAULT), "fault"},
  260. { BIT(ADRENO_CONTEXT_GPU_HANG), "gpu_hang"},
  261. { BIT(ADRENO_CONTEXT_GPU_HANG_FT), "gpu_hang_ft"},
  262. { BIT(ADRENO_CONTEXT_SKIP_EOF), "skip_end_of_frame" },
  263. { BIT(ADRENO_CONTEXT_FORCE_PREAMBLE), "force_preamble"});
  264. seq_puts(s, "\n");
  265. seq_puts(s, "timestamps: ");
  266. kgsl_readtimestamp(drawctxt->base.device, &drawctxt->base,
  267. KGSL_TIMESTAMP_QUEUED, &queued);
  268. kgsl_readtimestamp(drawctxt->base.device, &drawctxt->base,
  269. KGSL_TIMESTAMP_CONSUMED, &consumed);
  270. kgsl_readtimestamp(drawctxt->base.device, &drawctxt->base,
  271. KGSL_TIMESTAMP_RETIRED, &retired);
  272. seq_printf(s, "queued: %u consumed: %u retired: %u global:%u\n",
  273. queued, consumed, retired,
  274. drawctxt->internal_timestamp);
  275. seq_puts(s, "drawqueue:\n");
  276. spin_lock(&drawctxt->lock);
  277. for (i = drawctxt->drawqueue_head;
  278. i != drawctxt->drawqueue_tail;
  279. i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE))
  280. drawobj_print(s, drawctxt->drawqueue[i]);
  281. spin_unlock(&drawctxt->lock);
  282. seq_puts(s, "events:\n");
  283. spin_lock(&drawctxt->base.events.lock);
  284. list_for_each_entry(event, &drawctxt->base.events.events, node)
  285. seq_printf(s, "\t%d: %pS created: %u\n", event->timestamp,
  286. event->func, event->created);
  287. spin_unlock(&drawctxt->base.events.lock);
  288. return 0;
  289. }
  290. static int ctx_open(struct inode *inode, struct file *file)
  291. {
  292. int ret;
  293. struct adreno_context *ctx = inode->i_private;
  294. if (!_kgsl_context_get(&ctx->base))
  295. return -ENODEV;
  296. ret = single_open(file, ctx_print, &ctx->base);
  297. if (ret)
  298. kgsl_context_put(&ctx->base);
  299. return ret;
  300. }
  301. static int ctx_release(struct inode *inode, struct file *file)
  302. {
  303. struct kgsl_context *context;
  304. context = ((struct seq_file *)file->private_data)->private;
  305. kgsl_context_put(context);
  306. return single_release(inode, file);
  307. }
  308. static const struct file_operations ctx_fops = {
  309. .open = ctx_open,
  310. .read = seq_read,
  311. .llseek = seq_lseek,
  312. .release = ctx_release,
  313. };
  314. void
  315. adreno_context_debugfs_init(struct adreno_device *adreno_dev,
  316. struct adreno_context *ctx)
  317. {
  318. unsigned char name[16];
  319. /*
  320. * Get the context here to make sure it still exists for the life of the
  321. * file
  322. */
  323. _kgsl_context_get(&ctx->base);
  324. snprintf(name, sizeof(name), "%d", ctx->base.id);
  325. ctx->debug_root = debugfs_create_file(name, 0444,
  326. adreno_dev->ctx_d_debugfs, ctx, &ctx_fops);
  327. }
  328. static int _bcl_sid0_set(void *data, u64 val)
  329. {
  330. struct kgsl_device *device = data;
  331. const struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
  332. if (ops && ops->bcl_sid_set)
  333. return ops->bcl_sid_set(device, 0, val);
  334. return 0;
  335. }
  336. static int _bcl_sid0_get(void *data, u64 *val)
  337. {
  338. struct kgsl_device *device = data;
  339. const struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
  340. if (ops && ops->bcl_sid_get)
  341. *val = ops->bcl_sid_get(device, 0);
  342. return 0;
  343. }
  344. DEFINE_DEBUGFS_ATTRIBUTE(_sid0_fops, _bcl_sid0_get, _bcl_sid0_set, "%llu\n");
  345. static int _bcl_sid1_set(void *data, u64 val)
  346. {
  347. struct kgsl_device *device = data;
  348. const struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
  349. if (ops && ops->bcl_sid_set)
  350. return ops->bcl_sid_set(device, 1, val);
  351. return 0;
  352. }
  353. static int _bcl_sid1_get(void *data, u64 *val)
  354. {
  355. struct kgsl_device *device = data;
  356. const struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
  357. if (ops && ops->bcl_sid_get)
  358. *val = ops->bcl_sid_get(device, 1);
  359. return 0;
  360. }
  361. DEFINE_DEBUGFS_ATTRIBUTE(_sid1_fops, _bcl_sid1_get, _bcl_sid1_set, "%llu\n");
  362. static int _bcl_sid2_set(void *data, u64 val)
  363. {
  364. struct kgsl_device *device = data;
  365. const struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
  366. if (ops && ops->bcl_sid_set)
  367. return ops->bcl_sid_set(device, 2, val);
  368. return 0;
  369. }
  370. static int _bcl_sid2_get(void *data, u64 *val)
  371. {
  372. struct kgsl_device *device = data;
  373. const struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
  374. if (ops && ops->bcl_sid_get)
  375. *val = ops->bcl_sid_get(device, 2);
  376. return 0;
  377. }
  378. DEFINE_DEBUGFS_ATTRIBUTE(_sid2_fops, _bcl_sid2_get, _bcl_sid2_set, "%llu\n");
  379. static int _bcl_throttle_time_us_get(void *data, u64 *val)
  380. {
  381. struct kgsl_device *device = data;
  382. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  383. if (!ADRENO_FEATURE(adreno_dev, ADRENO_BCL))
  384. *val = 0;
  385. else
  386. *val = (u64) adreno_dev->bcl_throttle_time_us;
  387. return 0;
  388. }
  389. DEFINE_DEBUGFS_ATTRIBUTE(_bcl_throttle_fops, _bcl_throttle_time_us_get, NULL, "%llu\n");
  390. static int _skipsaverestore_store(void *data, u64 val)
  391. {
  392. struct adreno_device *adreno_dev = data;
  393. if (adreno_dev->hwsched_enabled)
  394. return adreno_power_cycle_bool(adreno_dev,
  395. &adreno_dev->preempt.skipsaverestore, val);
  396. adreno_dev->preempt.skipsaverestore = val ? true : false;
  397. return 0;
  398. }
  399. static int _skipsaverestore_show(void *data, u64 *val)
  400. {
  401. struct adreno_device *adreno_dev = data;
  402. *val = (u64) adreno_dev->preempt.skipsaverestore;
  403. return 0;
  404. }
  405. DEFINE_DEBUGFS_ATTRIBUTE(skipsaverestore_fops, _skipsaverestore_show, _skipsaverestore_store,
  406. "%llu\n");
  407. static int _usesgmem_store(void *data, u64 val)
  408. {
  409. struct adreno_device *adreno_dev = data;
  410. if (adreno_dev->hwsched_enabled)
  411. return adreno_power_cycle_bool(adreno_dev,
  412. &adreno_dev->preempt.usesgmem, val);
  413. adreno_dev->preempt.usesgmem = val ? true : false;
  414. return 0;
  415. }
  416. static int _usesgmem_show(void *data, u64 *val)
  417. {
  418. struct adreno_device *adreno_dev = data;
  419. *val = (u64) adreno_dev->preempt.usesgmem;
  420. return 0;
  421. }
  422. DEFINE_DEBUGFS_ATTRIBUTE(usesgmem_fops, _usesgmem_show, _usesgmem_store, "%llu\n");
  423. static int _preempt_level_store(void *data, u64 val)
  424. {
  425. struct adreno_device *adreno_dev = data;
  426. if (adreno_dev->hwsched_enabled)
  427. return adreno_power_cycle_u32(adreno_dev,
  428. &adreno_dev->preempt.preempt_level,
  429. min_t(u64, val, 2));
  430. adreno_dev->preempt.preempt_level = min_t(u64, val, 2);
  431. return 0;
  432. }
  433. static int _preempt_level_show(void *data, u64 *val)
  434. {
  435. struct adreno_device *adreno_dev = data;
  436. *val = (u64) adreno_dev->preempt.preempt_level;
  437. return 0;
  438. }
  439. DEFINE_DEBUGFS_ATTRIBUTE(preempt_level_fops, _preempt_level_show, _preempt_level_store, "%llu\n");
  440. static int _warmboot_show(void *data, u64 *val)
  441. {
  442. struct adreno_device *adreno_dev = data;
  443. *val = (u64)adreno_dev->warmboot_enabled;
  444. return 0;
  445. }
  446. /*
  447. * When warmboot feature is enabled from debugfs, the first slumber exit will be a cold boot
  448. * and all hfi messages will be recorded, so that warmboot can happen on subsequent slumber
  449. * exit. When warmboot feature is disabled from debugfs, every slumber exit will be a coldboot.
  450. */
  451. static int _warmboot_store(void *data, u64 val)
  452. {
  453. struct adreno_device *adreno_dev = data;
  454. if (adreno_dev->warmboot_enabled == val)
  455. return 0;
  456. return adreno_power_cycle_bool(adreno_dev, &adreno_dev->warmboot_enabled, val);
  457. }
  458. DEFINE_DEBUGFS_ATTRIBUTE(warmboot_fops, _warmboot_show, _warmboot_store, "%llu\n");
  459. static int _ifpc_hyst_store(void *data, u64 val)
  460. {
  461. struct adreno_device *adreno_dev = data;
  462. u32 hyst;
  463. if (!gmu_core_dev_ifpc_isenabled(KGSL_DEVICE(adreno_dev)))
  464. return -EINVAL;
  465. /* IFPC hysteresis timer is 16 bits */
  466. hyst = max_t(u32, (u32) (FIELD_GET(GENMASK(15, 0), val)),
  467. adreno_dev->ifpc_hyst_floor);
  468. if (hyst == adreno_dev->ifpc_hyst)
  469. return 0;
  470. return adreno_power_cycle_u32(adreno_dev,
  471. &adreno_dev->ifpc_hyst, hyst);
  472. }
  473. static int _ifpc_hyst_show(void *data, u64 *val)
  474. {
  475. struct adreno_device *adreno_dev = data;
  476. *val = (u64) adreno_dev->ifpc_hyst;
  477. return 0;
  478. }
  479. DEFINE_DEBUGFS_ATTRIBUTE(ifpc_hyst_fops, _ifpc_hyst_show, _ifpc_hyst_store, "%llu\n");
  480. void adreno_debugfs_init(struct adreno_device *adreno_dev)
  481. {
  482. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  483. struct dentry *snapshot_dir;
  484. if (IS_ERR_OR_NULL(device->d_debugfs))
  485. return;
  486. debugfs_create_file("active_cnt", 0444, device->d_debugfs, device,
  487. &_active_count_fops);
  488. adreno_dev->ctx_d_debugfs = debugfs_create_dir("ctx",
  489. device->d_debugfs);
  490. snapshot_dir = debugfs_lookup("snapshot", kgsl_debugfs_dir);
  491. if (!IS_ERR_OR_NULL(snapshot_dir))
  492. debugfs_create_file("coop_reset", 0644, snapshot_dir, device,
  493. &_coop_reset_fops);
  494. if (ADRENO_FEATURE(adreno_dev, ADRENO_LM)) {
  495. debugfs_create_file("lm_limit", 0644, device->d_debugfs, device,
  496. &_lm_limit_fops);
  497. debugfs_create_file("lm_threshold_count", 0444,
  498. device->d_debugfs, device, &_lm_threshold_fops);
  499. }
  500. if (adreno_is_a5xx(adreno_dev))
  501. debugfs_create_file("isdb", 0644, device->d_debugfs,
  502. device, &_isdb_fops);
  503. if (gmu_core_isenabled(device))
  504. debugfs_create_file("ifpc_hyst", 0644, device->d_debugfs,
  505. device, &ifpc_hyst_fops);
  506. if (ADRENO_FEATURE(adreno_dev, ADRENO_GMU_WARMBOOT))
  507. debugfs_create_file("warmboot", 0644, device->d_debugfs,
  508. device, &warmboot_fops);
  509. debugfs_create_file("ctxt_record_size", 0644, snapshot_dir,
  510. device, &_ctxt_record_size_fops);
  511. debugfs_create_file("gpu_client_pf", 0644, snapshot_dir,
  512. device, &_gpu_client_pf_fops);
  513. debugfs_create_bool("dump_all_ibs", 0644, snapshot_dir,
  514. &device->dump_all_ibs);
  515. adreno_dev->bcl_debugfs_dir = debugfs_create_dir("bcl", device->d_debugfs);
  516. if (!IS_ERR_OR_NULL(adreno_dev->bcl_debugfs_dir)) {
  517. debugfs_create_file("sid0", 0644, adreno_dev->bcl_debugfs_dir, device, &_sid0_fops);
  518. debugfs_create_file("sid1", 0644, adreno_dev->bcl_debugfs_dir, device, &_sid1_fops);
  519. debugfs_create_file("sid2", 0644, adreno_dev->bcl_debugfs_dir, device, &_sid2_fops);
  520. debugfs_create_file("bcl_throttle_time_us", 0444, adreno_dev->bcl_debugfs_dir,
  521. device, &_bcl_throttle_fops);
  522. }
  523. adreno_dev->preemption_debugfs_dir = debugfs_create_dir("preemption", device->d_debugfs);
  524. if (!IS_ERR_OR_NULL(adreno_dev->preemption_debugfs_dir)) {
  525. debugfs_create_file("preempt_level", 0644, adreno_dev->preemption_debugfs_dir,
  526. device, &preempt_level_fops);
  527. debugfs_create_file("usesgmem", 0644, adreno_dev->preemption_debugfs_dir, device,
  528. &usesgmem_fops);
  529. debugfs_create_file("skipsaverestore", 0644, adreno_dev->preemption_debugfs_dir,
  530. device, &skipsaverestore_fops);
  531. }
  532. }