sde_rotator_debug.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s: " fmt, __func__
  6. #include <linux/types.h>
  7. #include <linux/kernel.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/debugfs.h>
  11. #include "sde_rotator_debug.h"
  12. #include "sde_rotator_base.h"
  13. #include "sde_rotator_core.h"
  14. #include "sde_rotator_dev.h"
  15. #include "sde_rotator_trace.h"
  16. #ifdef CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG
  17. #define SDE_EVTLOG_DEFAULT_ENABLE 1
  18. #else
  19. #define SDE_EVTLOG_DEFAULT_ENABLE 0
  20. #endif
  21. #define SDE_EVTLOG_DEFAULT_PANIC 1
  22. #define SDE_EVTLOG_DEFAULT_REGDUMP SDE_ROT_DBG_DUMP_IN_MEM
  23. #define SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
  24. #define SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
  25. /*
  26. * evtlog will print this number of entries when it is called through
  27. * sysfs node or panic. This prevents kernel log from evtlog message
  28. * flood.
  29. */
  30. #define SDE_ROT_EVTLOG_PRINT_ENTRY 256
  31. /*
  32. * evtlog keeps this number of entries in memory for debug purpose. This
  33. * number must be greater than print entry to prevent out of bound evtlog
  34. * entry array access.
  35. */
  36. #define SDE_ROT_EVTLOG_ENTRY (SDE_ROT_EVTLOG_PRINT_ENTRY * 4)
  37. #define SDE_ROT_EVTLOG_MAX_DATA 15
  38. #define SDE_ROT_EVTLOG_BUF_MAX 512
  39. #define SDE_ROT_EVTLOG_BUF_ALIGN 32
  40. #define SDE_ROT_DEBUG_BASE_MAX 10
  41. #define SDE_ROT_DEFAULT_BASE_REG_CNT 0x100
  42. #define GROUP_BYTES 4
  43. #define ROW_BYTES 16
  44. #define SDE_ROT_TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
  45. static DEFINE_SPINLOCK(sde_rot_xlock);
  46. /*
  47. * tlog - EVTLOG entry structure
  48. * @counter - EVTLOG entriy counter
  49. * @time - timestamp of EVTLOG entry
  50. * @name - function name of EVTLOG entry
  51. * @line - line number of EVTLOG entry
  52. * @data - EVTLOG data contents
  53. * @data_cnt - number of data contents
  54. * @pid - pid of current calling thread
  55. */
  56. struct tlog {
  57. u32 counter;
  58. s64 time;
  59. const char *name;
  60. int line;
  61. u32 data[SDE_ROT_EVTLOG_MAX_DATA];
  62. u32 data_cnt;
  63. int pid;
  64. };
  65. /*
  66. * sde_rot_dbg_evtlog - EVTLOG debug data structure
  67. * @logs - EVTLOG entries
  68. * @first - first entry index in the EVTLOG
  69. * @last - last entry index in the EVTLOG
  70. * @curr - curr entry index in the EVTLOG
  71. * @evtlog - EVTLOG debugfs handle
  72. * @evtlog_enable - boolean indicates EVTLOG enable/disable
  73. * @panic_on_err - boolean indicates issue panic after EVTLOG dump
  74. * @enable_reg_dump - control in-log/memory dump for rotator registers
  75. * @enable_vbif_dbgbus_dump - control in-log/memory dump for VBIF debug bus
  76. * @enable_rot_dbgbus_dump - control in-log/memroy dump for rotator debug bus
  77. * @evtlog_dump_work - schedule work strucutre for timeout handler
  78. * @work_dump_reg - storage for register dump control in schedule work
  79. * @work_panic - storage for panic control in schedule work
  80. * @work_vbif_dbgbus - storage for VBIF debug bus control in schedule work
  81. * @work_rot_dbgbus - storage for rotator debug bus control in schedule work
  82. * @nrt_vbif_dbgbus_dump - memory buffer for VBIF debug bus dumping
  83. * @rot_dbgbus_dump - memory buffer for rotator debug bus dumping
  84. * @reg_dump_array - memory buffer for rotator registers dumping
  85. */
  86. struct sde_rot_dbg_evtlog {
  87. struct tlog logs[SDE_ROT_EVTLOG_ENTRY];
  88. u32 first;
  89. u32 last;
  90. u32 curr;
  91. struct dentry *evtlog;
  92. u32 evtlog_enable;
  93. u32 panic_on_err;
  94. u32 enable_reg_dump;
  95. u32 enable_vbif_dbgbus_dump;
  96. u32 enable_rot_dbgbus_dump;
  97. struct work_struct evtlog_dump_work;
  98. bool work_dump_reg;
  99. bool work_panic;
  100. bool work_vbif_dbgbus;
  101. bool work_rot_dbgbus;
  102. u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
  103. u32 *rot_dbgbus_dump;
  104. u32 *reg_dump_array[SDE_ROT_DEBUG_BASE_MAX];
  105. } sde_rot_dbg_evtlog;
  106. static void sde_rot_dump_debug_bus(u32 bus_dump_flag, u32 **dump_mem)
  107. {
  108. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  109. bool in_log, in_mem;
  110. u32 *dump_addr = NULL;
  111. u32 status = 0;
  112. struct sde_rot_debug_bus *head;
  113. phys_addr_t phys = 0;
  114. int i;
  115. u32 offset;
  116. void __iomem *base;
  117. in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  118. in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  119. base = mdata->sde_io.base;
  120. if (!base || !mdata->rot_dbg_bus || !mdata->rot_dbg_bus_size)
  121. return;
  122. pr_info("======== SDE Rotator Debug bus DUMP =========\n");
  123. if (in_mem) {
  124. if (!(*dump_mem))
  125. *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
  126. mdata->rot_dbg_bus_size * 4 * sizeof(u32),
  127. &phys, GFP_KERNEL);
  128. if (*dump_mem) {
  129. dump_addr = *dump_mem;
  130. pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
  131. __func__, dump_addr,
  132. dump_addr + (u32)mdata->rot_dbg_bus_size * 16);
  133. } else {
  134. in_mem = false;
  135. pr_err("dump_mem: allocation fails\n");
  136. }
  137. }
  138. sde_smmu_ctrl(1);
  139. for (i = 0; i < mdata->rot_dbg_bus_size; i++) {
  140. head = mdata->rot_dbg_bus + i;
  141. writel_relaxed(SDE_ROT_TEST_MASK(head->block_id, head->test_id),
  142. base + head->wr_addr);
  143. wmb(); /* make sure test bits were written */
  144. offset = head->wr_addr + 0x4;
  145. status = readl_relaxed(base + offset);
  146. if (in_log)
  147. pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
  148. head->wr_addr, head->block_id, head->test_id,
  149. status);
  150. if (dump_addr && in_mem) {
  151. dump_addr[i*4] = head->wr_addr;
  152. dump_addr[i*4 + 1] = head->block_id;
  153. dump_addr[i*4 + 2] = head->test_id;
  154. dump_addr[i*4 + 3] = status;
  155. }
  156. /* Disable debug bus once we are done */
  157. writel_relaxed(0, base + head->wr_addr);
  158. }
  159. sde_smmu_ctrl(0);
  160. pr_info("========End Debug bus=========\n");
  161. }
  162. /*
  163. * sde_rot_evtlog_is_enabled - helper function for checking EVTLOG
  164. * enable/disable
  165. * @flag - EVTLOG option flag
  166. */
  167. static inline bool sde_rot_evtlog_is_enabled(u32 flag)
  168. {
  169. return (flag & sde_rot_dbg_evtlog.evtlog_enable) ||
  170. (flag == SDE_ROT_EVTLOG_ALL &&
  171. sde_rot_dbg_evtlog.evtlog_enable);
  172. }
  173. /*
  174. * __vbif_debug_bus - helper function for VBIF debug bus dump
  175. * @head - VBIF debug bus data structure
  176. * @vbif_base - VBIF IO mapped address
  177. * @dump_addr - output buffer for memory dump option
  178. * @in_log - boolean indicates in-log dump option
  179. */
  180. static void __vbif_debug_bus(struct sde_rot_vbif_debug_bus *head,
  181. void __iomem *vbif_base, u32 *dump_addr, bool in_log)
  182. {
  183. int i, j;
  184. u32 val;
  185. if (!dump_addr && !in_log)
  186. return;
  187. for (i = 0; i < head->block_cnt; i++) {
  188. writel_relaxed(1 << (i + head->bit_offset),
  189. vbif_base + head->block_bus_addr);
  190. /* make sure that current bus blcok enable */
  191. wmb();
  192. for (j = 0; j < head->test_pnt_cnt; j++) {
  193. writel_relaxed(j, vbif_base + head->block_bus_addr + 4);
  194. /* make sure that test point is enabled */
  195. wmb();
  196. val = readl_relaxed(vbif_base + MMSS_VBIF_TEST_BUS_OUT);
  197. if (dump_addr) {
  198. *dump_addr++ = head->block_bus_addr;
  199. *dump_addr++ = i;
  200. *dump_addr++ = j;
  201. *dump_addr++ = val;
  202. }
  203. if (in_log)
  204. pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
  205. head->block_bus_addr, i, j, val);
  206. }
  207. }
  208. }
  209. /*
  210. * sde_rot_dump_vbif_debug_bus - VBIF debug bus dump
  211. * @bus_dump_flag - dump flag controlling in-log/memory dump option
  212. * @dump_mem - output buffer for memory dump location
  213. */
  214. static void sde_rot_dump_vbif_debug_bus(u32 bus_dump_flag,
  215. u32 **dump_mem)
  216. {
  217. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  218. bool in_log, in_mem;
  219. u32 *dump_addr = NULL;
  220. u32 value;
  221. struct sde_rot_vbif_debug_bus *head;
  222. phys_addr_t phys = 0;
  223. int i, list_size = 0;
  224. void __iomem *vbif_base;
  225. struct sde_rot_vbif_debug_bus *dbg_bus;
  226. u32 bus_size;
  227. pr_info("======== NRT VBIF Debug bus DUMP =========\n");
  228. vbif_base = mdata->vbif_nrt_io.base;
  229. dbg_bus = mdata->nrt_vbif_dbg_bus;
  230. bus_size = mdata->nrt_vbif_dbg_bus_size;
  231. if (!vbif_base || !dbg_bus || !bus_size)
  232. return;
  233. /* allocate memory for each test point */
  234. for (i = 0; i < bus_size; i++) {
  235. head = dbg_bus + i;
  236. list_size += (head->block_cnt * head->test_pnt_cnt);
  237. }
  238. /* 4 bytes * 4 entries for each test point*/
  239. list_size *= 16;
  240. in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  241. in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  242. if (in_mem) {
  243. if (!(*dump_mem))
  244. *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
  245. list_size, &phys, GFP_KERNEL);
  246. if (*dump_mem) {
  247. dump_addr = *dump_mem;
  248. pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
  249. __func__, dump_addr, dump_addr + list_size);
  250. } else {
  251. in_mem = false;
  252. pr_err("dump_mem: allocation fails\n");
  253. }
  254. }
  255. sde_smmu_ctrl(1);
  256. value = readl_relaxed(vbif_base + MMSS_VBIF_CLKON);
  257. writel_relaxed(value | BIT(1), vbif_base + MMSS_VBIF_CLKON);
  258. /* make sure that vbif core is on */
  259. wmb();
  260. for (i = 0; i < bus_size; i++) {
  261. head = dbg_bus + i;
  262. writel_relaxed(0, vbif_base + head->disable_bus_addr);
  263. writel_relaxed(BIT(0), vbif_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
  264. /* make sure that other bus is off */
  265. wmb();
  266. __vbif_debug_bus(head, vbif_base, dump_addr, in_log);
  267. if (dump_addr)
  268. dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
  269. }
  270. sde_smmu_ctrl(0);
  271. pr_info("========End VBIF Debug bus=========\n");
  272. }
  273. /*
  274. * sde_rot_dump_reg - helper function for dumping rotator register set content
  275. * @dump_name - register set name
  276. * @reg_dump_flag - dumping flag controlling in-log/memory dump location
  277. * @access - access type, sde registers or vbif registers
  278. * @addr - starting address offset for dumping
  279. * @len - range of the register set
  280. * @dump_mem - output buffer for memory dump location option
  281. */
  282. void sde_rot_dump_reg(const char *dump_name, u32 reg_dump_flag,
  283. enum sde_rot_regdump_access access, u32 addr,
  284. int len, u32 **dump_mem)
  285. {
  286. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  287. bool in_log, in_mem;
  288. u32 *dump_addr = NULL;
  289. phys_addr_t phys = 0;
  290. int i;
  291. void __iomem *base;
  292. in_log = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  293. in_mem = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  294. pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
  295. reg_dump_flag, in_log, in_mem);
  296. if (len % 16)
  297. len += 16;
  298. len /= 16;
  299. if (in_mem) {
  300. if (!(*dump_mem))
  301. *dump_mem = dma_alloc_coherent(&mdata->pdev->dev,
  302. len * 16, &phys, GFP_KERNEL);
  303. if (*dump_mem) {
  304. dump_addr = *dump_mem;
  305. pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%X\n",
  306. dump_name, dump_addr, dump_addr + (u32)len * 16,
  307. addr);
  308. } else {
  309. in_mem = false;
  310. pr_err("dump_mem: kzalloc fails!\n");
  311. }
  312. }
  313. base = mdata->sde_io.base;
  314. /*
  315. * VBIF NRT base handling
  316. */
  317. if (access == SDE_ROT_REGDUMP_VBIF)
  318. base = mdata->vbif_nrt_io.base;
  319. for (i = 0; i < len; i++) {
  320. u32 x0, x4, x8, xc;
  321. x0 = readl_relaxed(base + addr+0x0);
  322. x4 = readl_relaxed(base + addr+0x4);
  323. x8 = readl_relaxed(base + addr+0x8);
  324. xc = readl_relaxed(base + addr+0xc);
  325. if (in_log)
  326. pr_info("0x%08X : %08x %08x %08x %08x\n",
  327. addr, x0, x4, x8, xc);
  328. if (dump_addr && in_mem) {
  329. dump_addr[i*4] = x0;
  330. dump_addr[i*4 + 1] = x4;
  331. dump_addr[i*4 + 2] = x8;
  332. dump_addr[i*4 + 3] = xc;
  333. }
  334. addr += 16;
  335. }
  336. }
  337. /*
  338. * sde_rot_dump_reg_all - dumping all SDE rotator registers
  339. */
  340. static void sde_rot_dump_reg_all(void)
  341. {
  342. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  343. struct sde_rot_regdump *head, *regdump;
  344. u32 regdump_size;
  345. int i;
  346. regdump = mdata->regdump;
  347. regdump_size = mdata->regdump_size;
  348. if (!regdump || !regdump_size)
  349. return;
  350. /* Enable clock to rotator if not yet enabled */
  351. sde_smmu_ctrl(1);
  352. for (i = 0; (i < regdump_size) && (i < SDE_ROT_DEBUG_BASE_MAX); i++) {
  353. head = &regdump[i];
  354. if (head->access == SDE_ROT_REGDUMP_WRITE) {
  355. if (head->len != 1) {
  356. SDEROT_ERR("invalid write len %u\n", head->len);
  357. continue;
  358. }
  359. writel_relaxed(head->value,
  360. mdata->sde_io.base + head->offset);
  361. /* Make sure write go through */
  362. wmb();
  363. } else {
  364. sde_rot_dump_reg(head->name,
  365. sde_rot_dbg_evtlog.enable_reg_dump,
  366. head->access,
  367. head->offset, head->len,
  368. &sde_rot_dbg_evtlog.reg_dump_array[i]);
  369. }
  370. }
  371. /* Disable rotator clock */
  372. sde_smmu_ctrl(0);
  373. }
  374. /*
  375. * __sde_rot_evtlog_dump_calc_range - calculate dump range for EVTLOG
  376. */
  377. static bool __sde_rot_evtlog_dump_calc_range(void)
  378. {
  379. static u32 next;
  380. bool need_dump = true;
  381. unsigned long flags;
  382. struct sde_rot_dbg_evtlog *evtlog = &sde_rot_dbg_evtlog;
  383. spin_lock_irqsave(&sde_rot_xlock, flags);
  384. evtlog->first = next;
  385. if (evtlog->last == evtlog->first) {
  386. need_dump = false;
  387. goto dump_exit;
  388. }
  389. if (evtlog->last < evtlog->first) {
  390. evtlog->first %= SDE_ROT_EVTLOG_ENTRY;
  391. if (evtlog->last < evtlog->first)
  392. evtlog->last += SDE_ROT_EVTLOG_ENTRY;
  393. }
  394. if ((evtlog->last - evtlog->first) > SDE_ROT_EVTLOG_PRINT_ENTRY) {
  395. pr_warn("evtlog buffer overflow before dump: %d\n",
  396. evtlog->last - evtlog->first);
  397. evtlog->first = evtlog->last - SDE_ROT_EVTLOG_PRINT_ENTRY;
  398. }
  399. next = evtlog->first + 1;
  400. dump_exit:
  401. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  402. return need_dump;
  403. }
  404. /*
  405. * sde_rot_evtlog_dump_entry - helper function for EVTLOG content dumping
  406. * @evtlog_buf: EVTLOG dump output buffer
  407. * @evtlog_buf_size: EVTLOG output buffer size
  408. */
  409. static ssize_t sde_rot_evtlog_dump_entry(char *evtlog_buf,
  410. ssize_t evtlog_buf_size)
  411. {
  412. int i;
  413. ssize_t off = 0;
  414. struct tlog *log, *prev_log;
  415. unsigned long flags;
  416. spin_lock_irqsave(&sde_rot_xlock, flags);
  417. log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.first %
  418. SDE_ROT_EVTLOG_ENTRY];
  419. prev_log = &sde_rot_dbg_evtlog.logs[(sde_rot_dbg_evtlog.first - 1) %
  420. SDE_ROT_EVTLOG_ENTRY];
  421. off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
  422. log->name, log->line);
  423. if (off < SDE_ROT_EVTLOG_BUF_ALIGN) {
  424. memset((evtlog_buf + off), 0x20,
  425. (SDE_ROT_EVTLOG_BUF_ALIGN - off));
  426. off = SDE_ROT_EVTLOG_BUF_ALIGN;
  427. }
  428. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
  429. "=>[%-8d:%-11llu:%9llu][%-4d]:", sde_rot_dbg_evtlog.first,
  430. log->time, (log->time - prev_log->time), log->pid);
  431. for (i = 0; i < log->data_cnt; i++)
  432. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
  433. "%x ", log->data[i]);
  434. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
  435. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  436. return off;
  437. }
  438. /*
  439. * sde_rot_evtlog_dump_all - Dumping all content in EVTLOG buffer
  440. */
  441. static void sde_rot_evtlog_dump_all(void)
  442. {
  443. char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
  444. while (__sde_rot_evtlog_dump_calc_range()) {
  445. sde_rot_evtlog_dump_entry(evtlog_buf, SDE_ROT_EVTLOG_BUF_MAX);
  446. pr_info("%s\n", evtlog_buf);
  447. }
  448. }
  449. /*
  450. * sde_rot_evtlog_dump_open - debugfs open handler for evtlog dump
  451. * @inode: debugfs inode
  452. * @file: file handler
  453. */
  454. static int sde_rot_evtlog_dump_open(struct inode *inode, struct file *file)
  455. {
  456. /* non-seekable */
  457. file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
  458. file->private_data = inode->i_private;
  459. return 0;
  460. }
  461. /*
  462. * sde_rot_evtlog_dump_read - debugfs read handler for evtlog dump
  463. * @file: file handler
  464. * @buff: user buffer content for debugfs
  465. * @count: size of user buffer
  466. * @ppos: position offset of user buffer
  467. */
  468. static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff,
  469. size_t count, loff_t *ppos)
  470. {
  471. ssize_t len = 0;
  472. char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
  473. if (__sde_rot_evtlog_dump_calc_range()) {
  474. len = sde_rot_evtlog_dump_entry(evtlog_buf,
  475. SDE_ROT_EVTLOG_BUF_MAX);
  476. if (len < 0 || len > count) {
  477. pr_err("len is more than the user buffer size\n");
  478. return 0;
  479. }
  480. if (copy_to_user(buff, evtlog_buf, len))
  481. return -EFAULT;
  482. *ppos += len;
  483. }
  484. return len;
  485. }
  486. /*
  487. * sde_rot_evtlog_dump_write - debugfs write handler for evtlog dump
  488. * @file: file handler
  489. * @user_buf: user buffer content from debugfs
  490. * @count: size of user buffer
  491. * @ppos: position offset of user buffer
  492. */
  493. static ssize_t sde_rot_evtlog_dump_write(struct file *file,
  494. const char __user *user_buf, size_t count, loff_t *ppos)
  495. {
  496. sde_rot_evtlog_dump_all();
  497. sde_rot_dump_reg_all();
  498. if (sde_rot_dbg_evtlog.panic_on_err)
  499. panic("evtlog_dump_write");
  500. return count;
  501. }
  502. /*
  503. * sde_rot_evtlog_dump_helper - helper function for evtlog dump
  504. * @dead: boolean indicates panic after dump
  505. * @panic_name: Panic signature name show up in log
  506. * @dump_rot: boolean indicates rotator register dump
  507. * @dump_vbif_debug_bus: boolean indicates VBIF debug bus dump
  508. */
  509. static void sde_rot_evtlog_dump_helper(bool dead, const char *panic_name,
  510. bool dump_rot, bool dump_vbif_debug_bus, bool dump_rot_debug_bus)
  511. {
  512. sde_rot_evtlog_dump_all();
  513. if (dump_rot_debug_bus)
  514. sde_rot_dump_debug_bus(
  515. sde_rot_dbg_evtlog.enable_rot_dbgbus_dump,
  516. &sde_rot_dbg_evtlog.rot_dbgbus_dump);
  517. if (dump_vbif_debug_bus)
  518. sde_rot_dump_vbif_debug_bus(
  519. sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump,
  520. &sde_rot_dbg_evtlog.nrt_vbif_dbgbus_dump);
  521. /*
  522. * Rotator registers always dump last
  523. */
  524. if (dump_rot)
  525. sde_rot_dump_reg_all();
  526. if (dead)
  527. panic(panic_name);
  528. }
  529. /*
  530. * sde_rot_evtlog_debug_work - schedule work function for evtlog dump
  531. * @work: schedule work structure
  532. */
  533. static void sde_rot_evtlog_debug_work(struct work_struct *work)
  534. {
  535. sde_rot_evtlog_dump_helper(
  536. sde_rot_dbg_evtlog.work_panic,
  537. "evtlog_workitem",
  538. sde_rot_dbg_evtlog.work_dump_reg,
  539. sde_rot_dbg_evtlog.work_vbif_dbgbus,
  540. sde_rot_dbg_evtlog.work_rot_dbgbus);
  541. }
  542. /*
  543. * sde_rot_evtlog_tout_handler - log dump timeout handler
  544. * @queue: boolean indicate putting log dump into queue
  545. * @name: function name having timeout
  546. */
  547. void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
  548. {
  549. int i;
  550. bool dead = false;
  551. bool dump_rot = false;
  552. bool dump_vbif_dbgbus = false;
  553. bool dump_rot_dbgbus = false;
  554. char *blk_name = NULL;
  555. va_list args;
  556. if (!sde_rot_evtlog_is_enabled(SDE_ROT_EVTLOG_DEFAULT))
  557. return;
  558. if (queue && work_pending(&sde_rot_dbg_evtlog.evtlog_dump_work))
  559. return;
  560. va_start(args, name);
  561. for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
  562. blk_name = va_arg(args, char*);
  563. if (IS_ERR_OR_NULL(blk_name))
  564. break;
  565. if (!strcmp(blk_name, "rot"))
  566. dump_rot = true;
  567. if (!strcmp(blk_name, "vbif_dbg_bus"))
  568. dump_vbif_dbgbus = true;
  569. if (!strcmp(blk_name, "rot_dbg_bus"))
  570. dump_rot_dbgbus = true;
  571. if (!strcmp(blk_name, "panic"))
  572. dead = true;
  573. }
  574. va_end(args);
  575. if (queue) {
  576. /* schedule work to dump later */
  577. sde_rot_dbg_evtlog.work_panic = dead;
  578. sde_rot_dbg_evtlog.work_dump_reg = dump_rot;
  579. sde_rot_dbg_evtlog.work_vbif_dbgbus = dump_vbif_dbgbus;
  580. sde_rot_dbg_evtlog.work_rot_dbgbus = dump_rot_dbgbus;
  581. schedule_work(&sde_rot_dbg_evtlog.evtlog_dump_work);
  582. } else {
  583. sde_rot_evtlog_dump_helper(dead, name, dump_rot,
  584. dump_vbif_dbgbus, dump_rot_dbgbus);
  585. }
  586. }
  587. /*
  588. * sde_rot_evtlog - log contents into memory for dump analysis
  589. * @name: Name of function calling evtlog
  590. * @line: line number of calling function
  591. * @flag: Log control flag
  592. */
  593. void sde_rot_evtlog(const char *name, int line, int flag, ...)
  594. {
  595. unsigned long flags;
  596. int i, val = 0;
  597. va_list args;
  598. struct tlog *log;
  599. if (!sde_rot_evtlog_is_enabled(flag))
  600. return;
  601. spin_lock_irqsave(&sde_rot_xlock, flags);
  602. log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.curr];
  603. log->time = ktime_to_us(ktime_get());
  604. log->name = name;
  605. log->line = line;
  606. log->data_cnt = 0;
  607. log->pid = current->pid;
  608. va_start(args, flag);
  609. for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
  610. val = va_arg(args, int);
  611. if (val == SDE_ROT_DATA_LIMITER)
  612. break;
  613. log->data[i] = val;
  614. }
  615. va_end(args);
  616. log->data_cnt = i;
  617. sde_rot_dbg_evtlog.curr =
  618. (sde_rot_dbg_evtlog.curr + 1) % SDE_ROT_EVTLOG_ENTRY;
  619. sde_rot_dbg_evtlog.last++;
  620. trace_sde_rot_evtlog(name, line, log->data_cnt, log->data);
  621. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  622. }
  623. /*
  624. * sde_rotator_stat_show - Show statistics on read to this debugfs file
  625. * @s: Pointer to sequence file structure
  626. * @data: Pointer to private data structure
  627. */
  628. static int sde_rotator_stat_show(struct seq_file *s, void *data)
  629. {
  630. int i, offset;
  631. struct sde_rotator_device *rot_dev = s->private;
  632. struct sde_rotator_statistics *stats = &rot_dev->stats;
  633. u64 count = stats->count;
  634. int num_events;
  635. s64 proc_max, proc_min, proc_avg;
  636. s64 swoh_max, swoh_min, swoh_avg;
  637. proc_max = 0;
  638. proc_min = S64_MAX;
  639. proc_avg = 0;
  640. swoh_max = 0;
  641. swoh_min = S64_MAX;
  642. swoh_avg = 0;
  643. if (count > SDE_ROTATOR_NUM_EVENTS) {
  644. num_events = SDE_ROTATOR_NUM_EVENTS;
  645. offset = count % SDE_ROTATOR_NUM_EVENTS;
  646. } else {
  647. num_events = count;
  648. offset = 0;
  649. }
  650. for (i = 0; i < num_events; i++) {
  651. int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
  652. ktime_t *ts = stats->ts[k];
  653. ktime_t start_time =
  654. ktime_before(ts[SDE_ROTATOR_TS_SRCQB],
  655. ts[SDE_ROTATOR_TS_DSTQB]) ?
  656. ts[SDE_ROTATOR_TS_SRCQB] :
  657. ts[SDE_ROTATOR_TS_DSTQB];
  658. s64 proc_time =
  659. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
  660. start_time));
  661. s64 sw_overhead_time =
  662. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
  663. start_time));
  664. seq_printf(s,
  665. "s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld st:%lld fl:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
  666. i,
  667. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
  668. ts[SDE_ROTATOR_TS_SRCQB])),
  669. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
  670. ts[SDE_ROTATOR_TS_DSTQB])),
  671. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_QUEUE],
  672. ts[SDE_ROTATOR_TS_FENCE])),
  673. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_COMMIT],
  674. ts[SDE_ROTATOR_TS_QUEUE])),
  675. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_START],
  676. ts[SDE_ROTATOR_TS_COMMIT])),
  677. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
  678. ts[SDE_ROTATOR_TS_START])),
  679. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
  680. ts[SDE_ROTATOR_TS_FLUSH])),
  681. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
  682. ts[SDE_ROTATOR_TS_DONE])),
  683. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_SRCDQB],
  684. ts[SDE_ROTATOR_TS_RETIRE])),
  685. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DSTDQB],
  686. ts[SDE_ROTATOR_TS_RETIRE])),
  687. proc_time, sw_overhead_time);
  688. proc_max = max(proc_max, proc_time);
  689. proc_min = min(proc_min, proc_time);
  690. proc_avg += proc_time;
  691. swoh_max = max(swoh_max, sw_overhead_time);
  692. swoh_min = min(swoh_min, sw_overhead_time);
  693. swoh_avg += sw_overhead_time;
  694. }
  695. proc_avg = (num_events) ?
  696. DIV_ROUND_CLOSEST_ULL(proc_avg, num_events) : 0;
  697. swoh_avg = (num_events) ?
  698. DIV_ROUND_CLOSEST_ULL(swoh_avg, num_events) : 0;
  699. seq_printf(s, "count:%llu\n", count);
  700. seq_printf(s, "fai1:%llu\n", stats->fail_count);
  701. seq_printf(s, "t_max:%lld\n", proc_max);
  702. seq_printf(s, "t_min:%lld\n", proc_min);
  703. seq_printf(s, "t_avg:%lld\n", proc_avg);
  704. seq_printf(s, "swoh_max:%lld\n", swoh_max);
  705. seq_printf(s, "swoh_min:%lld\n", swoh_min);
  706. seq_printf(s, "swoh_avg:%lld\n", swoh_avg);
  707. return 0;
  708. }
  709. /*
  710. * sde_rotator_raw_show - Show raw statistics on read from this debugfs file
  711. * @s: Pointer to sequence file structure
  712. * @data: Pointer to private data structure
  713. */
  714. static int sde_rotator_raw_show(struct seq_file *s, void *data)
  715. {
  716. int i, j, offset;
  717. struct sde_rotator_device *rot_dev = s->private;
  718. struct sde_rotator_statistics *stats = &rot_dev->stats;
  719. u64 count = stats->count;
  720. int num_events;
  721. if (count > SDE_ROTATOR_NUM_EVENTS) {
  722. num_events = SDE_ROTATOR_NUM_EVENTS;
  723. offset = count % SDE_ROTATOR_NUM_EVENTS;
  724. } else {
  725. num_events = count;
  726. offset = 0;
  727. }
  728. for (i = 0; i < num_events; i++) {
  729. int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
  730. ktime_t *ts = stats->ts[k];
  731. seq_printf(s, "%d ", i);
  732. for (j = 0; j < SDE_ROTATOR_NUM_TIMESTAMPS; j++)
  733. seq_printf(s, "%lld ", ktime_to_us(ts[j]));
  734. seq_puts(s, "\n");
  735. }
  736. return 0;
  737. }
  738. /*
  739. * sde_rotator_dbg_open - Processed statistics debugfs file open function
  740. * @inode:
  741. * @file:
  742. */
  743. static int sde_rotator_stat_open(struct inode *inode, struct file *file)
  744. {
  745. return single_open(file, sde_rotator_stat_show, inode->i_private);
  746. }
  747. /*
  748. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  749. * @inode:
  750. * @file:
  751. */
  752. static int sde_rotator_raw_open(struct inode *inode, struct file *file)
  753. {
  754. return single_open(file, sde_rotator_raw_show, inode->i_private);
  755. }
  756. /*
  757. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  758. * @mdata: Pointer to rotator global data
  759. * @debugfs_root: Pointer to parent debugfs node
  760. */
  761. static int sde_rotator_base_create_debugfs(
  762. struct sde_rot_data_type *mdata,
  763. struct dentry *debugfs_root)
  764. {
  765. if (!debugfs_create_u32("iommu_ref_cnt", 0444,
  766. debugfs_root, &mdata->iommu_ref_cnt)) {
  767. SDEROT_WARN("failed to create debugfs iommu ref cnt\n");
  768. return -EINVAL;
  769. }
  770. mdata->clk_always_on = false;
  771. if (!debugfs_create_bool("clk_always_on", 0644,
  772. debugfs_root, &mdata->clk_always_on)) {
  773. SDEROT_WARN("failed to create debugfs clk_always_on\n");
  774. return -EINVAL;
  775. }
  776. return 0;
  777. }
  778. /*
  779. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  780. * @mgr: Pointer to rotator manager structure
  781. * @debugfs_root: Pointer to parent debugfs node
  782. */
  783. static int sde_rotator_core_create_debugfs(
  784. struct sde_rot_mgr *mgr,
  785. struct dentry *debugfs_root)
  786. {
  787. int ret;
  788. if (!debugfs_create_u32("hwacquire_timeout", 0400,
  789. debugfs_root, &mgr->hwacquire_timeout)) {
  790. SDEROT_WARN("failed to create debugfs hw acquire timeout\n");
  791. return -EINVAL;
  792. }
  793. if (!debugfs_create_u32("ppc_numer", 0644,
  794. debugfs_root, &mgr->pixel_per_clk.numer)) {
  795. SDEROT_WARN("failed to create debugfs ppc numerator\n");
  796. return -EINVAL;
  797. }
  798. if (!debugfs_create_u32("ppc_denom", 0600,
  799. debugfs_root, &mgr->pixel_per_clk.denom)) {
  800. SDEROT_WARN("failed to create debugfs ppc denominator\n");
  801. return -EINVAL;
  802. }
  803. if (!debugfs_create_u64("enable_bw_vote", 0644,
  804. debugfs_root, &mgr->enable_bw_vote)) {
  805. SDEROT_WARN("failed to create enable_bw_vote\n");
  806. return -EINVAL;
  807. }
  808. if (mgr->ops_hw_create_debugfs) {
  809. ret = mgr->ops_hw_create_debugfs(mgr, debugfs_root);
  810. if (ret)
  811. return ret;
  812. }
  813. return 0;
  814. }
  815. static const struct file_operations sde_rot_evtlog_fops = {
  816. .open = sde_rot_evtlog_dump_open,
  817. .read = sde_rot_evtlog_dump_read,
  818. .write = sde_rot_evtlog_dump_write,
  819. };
  820. static int sde_rotator_evtlog_create_debugfs(
  821. struct sde_rot_mgr *mgr,
  822. struct dentry *debugfs_root)
  823. {
  824. int i;
  825. sde_rot_dbg_evtlog.evtlog = debugfs_create_dir("evtlog", debugfs_root);
  826. if (IS_ERR_OR_NULL(sde_rot_dbg_evtlog.evtlog)) {
  827. pr_err("debugfs_create_dir fail, error %ld\n",
  828. PTR_ERR(sde_rot_dbg_evtlog.evtlog));
  829. sde_rot_dbg_evtlog.evtlog = NULL;
  830. return -ENODEV;
  831. }
  832. INIT_WORK(&sde_rot_dbg_evtlog.evtlog_dump_work,
  833. sde_rot_evtlog_debug_work);
  834. sde_rot_dbg_evtlog.work_panic = false;
  835. for (i = 0; i < SDE_ROT_EVTLOG_ENTRY; i++)
  836. sde_rot_dbg_evtlog.logs[i].counter = i;
  837. debugfs_create_file("dump", 0644, sde_rot_dbg_evtlog.evtlog, NULL,
  838. &sde_rot_evtlog_fops);
  839. debugfs_create_u32("enable", 0644, sde_rot_dbg_evtlog.evtlog,
  840. &sde_rot_dbg_evtlog.evtlog_enable);
  841. debugfs_create_u32("panic", 0644, sde_rot_dbg_evtlog.evtlog,
  842. &sde_rot_dbg_evtlog.panic_on_err);
  843. debugfs_create_u32("reg_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  844. &sde_rot_dbg_evtlog.enable_reg_dump);
  845. debugfs_create_u32("vbif_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  846. &sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump);
  847. debugfs_create_u32("rot_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  848. &sde_rot_dbg_evtlog.enable_rot_dbgbus_dump);
  849. sde_rot_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
  850. sde_rot_dbg_evtlog.panic_on_err = SDE_EVTLOG_DEFAULT_PANIC;
  851. sde_rot_dbg_evtlog.enable_reg_dump = SDE_EVTLOG_DEFAULT_REGDUMP;
  852. sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump =
  853. SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP;
  854. sde_rot_dbg_evtlog.enable_rot_dbgbus_dump =
  855. SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP;
  856. pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
  857. sde_rot_dbg_evtlog.evtlog_enable,
  858. sde_rot_dbg_evtlog.panic_on_err,
  859. sde_rot_dbg_evtlog.enable_reg_dump);
  860. return 0;
  861. }
  862. /*
  863. * struct sde_rotator_stat_ops - processed statistics file operations
  864. */
  865. static const struct file_operations sde_rotator_stat_ops = {
  866. .open = sde_rotator_stat_open,
  867. .read = seq_read,
  868. .llseek = seq_lseek,
  869. .release = single_release
  870. };
  871. /*
  872. * struct sde_rotator_raw_ops - raw statistics file operations
  873. */
  874. static const struct file_operations sde_rotator_raw_ops = {
  875. .open = sde_rotator_raw_open,
  876. .read = seq_read,
  877. .llseek = seq_lseek,
  878. .release = single_release
  879. };
  880. static int sde_rotator_debug_base_open(struct inode *inode, struct file *file)
  881. {
  882. /* non-seekable */
  883. file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
  884. file->private_data = inode->i_private;
  885. return 0;
  886. }
  887. static int sde_rotator_debug_base_release(struct inode *inode,
  888. struct file *file)
  889. {
  890. struct sde_rotator_debug_base *dbg = file->private_data;
  891. if (dbg) {
  892. mutex_lock(&dbg->buflock);
  893. kfree(dbg->buf);
  894. dbg->buf_len = 0;
  895. dbg->buf = NULL;
  896. mutex_unlock(&dbg->buflock);
  897. }
  898. return 0;
  899. }
  900. static ssize_t sde_rotator_debug_base_offset_write(struct file *file,
  901. const char __user *user_buf, size_t count, loff_t *ppos)
  902. {
  903. struct sde_rotator_debug_base *dbg = file->private_data;
  904. u32 off = 0;
  905. u32 cnt = SDE_ROT_DEFAULT_BASE_REG_CNT;
  906. char buf[24];
  907. if (!dbg)
  908. return -ENODEV;
  909. if (count >= sizeof(buf))
  910. return -EFAULT;
  911. if (copy_from_user(buf, user_buf, count))
  912. return -EFAULT;
  913. buf[count] = 0;
  914. if (sscanf(buf, "%5x %x", &off, &cnt) < 2)
  915. return -EINVAL;
  916. if (off % sizeof(u32))
  917. return -EINVAL;
  918. if (off > dbg->max_offset)
  919. return -EINVAL;
  920. if (cnt > (dbg->max_offset - off))
  921. cnt = dbg->max_offset - off;
  922. mutex_lock(&dbg->buflock);
  923. dbg->off = off;
  924. dbg->cnt = cnt;
  925. mutex_unlock(&dbg->buflock);
  926. SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
  927. return count;
  928. }
  929. static ssize_t sde_rotator_debug_base_offset_read(struct file *file,
  930. char __user *buff, size_t count, loff_t *ppos)
  931. {
  932. struct sde_rotator_debug_base *dbg = file->private_data;
  933. int len = 0;
  934. char buf[24] = {'\0'};
  935. if (!dbg)
  936. return -ENODEV;
  937. if (*ppos)
  938. return 0; /* the end */
  939. mutex_lock(&dbg->buflock);
  940. len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
  941. mutex_unlock(&dbg->buflock);
  942. if (len < 0 || len >= sizeof(buf))
  943. return 0;
  944. if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
  945. return -EFAULT;
  946. *ppos += len; /* increase offset */
  947. return len;
  948. }
  949. static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
  950. const char __user *user_buf, size_t count, loff_t *ppos)
  951. {
  952. struct sde_rotator_debug_base *dbg = file->private_data;
  953. size_t off;
  954. u32 data, cnt;
  955. char buf[24];
  956. if (!dbg)
  957. return -ENODEV;
  958. if (count >= sizeof(buf))
  959. return -EFAULT;
  960. if (copy_from_user(buf, user_buf, count))
  961. return -EFAULT;
  962. buf[count] = 0;
  963. cnt = sscanf(buf, "%zx %x", &off, &data);
  964. if (cnt < 2)
  965. return -EFAULT;
  966. if (off % sizeof(u32))
  967. return -EFAULT;
  968. if (off >= dbg->max_offset)
  969. return -EFAULT;
  970. mutex_lock(&dbg->buflock);
  971. /* Enable Clock for register access */
  972. sde_rot_mgr_lock(dbg->mgr);
  973. if (!sde_rotator_resource_ctrl_enabled(dbg->mgr)) {
  974. SDEROT_WARN("resource ctrl is not enabled\n");
  975. sde_rot_mgr_unlock(dbg->mgr);
  976. goto debug_write_error;
  977. }
  978. sde_rotator_clk_ctrl(dbg->mgr, true);
  979. writel_relaxed(data, dbg->base + off);
  980. /* Disable Clock after register access */
  981. sde_rotator_clk_ctrl(dbg->mgr, false);
  982. sde_rot_mgr_unlock(dbg->mgr);
  983. mutex_unlock(&dbg->buflock);
  984. SDEROT_DBG("addr=%zx data=%x\n", off, data);
  985. return count;
  986. debug_write_error:
  987. mutex_unlock(&dbg->buflock);
  988. return 0;
  989. }
  990. static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
  991. char __user *user_buf, size_t count, loff_t *ppos)
  992. {
  993. struct sde_rotator_debug_base *dbg = file->private_data;
  994. size_t len;
  995. int rc = 0;
  996. if (!dbg) {
  997. SDEROT_ERR("invalid handle\n");
  998. return -ENODEV;
  999. }
  1000. mutex_lock(&dbg->buflock);
  1001. if (!dbg->buf) {
  1002. char dump_buf[64];
  1003. char *ptr;
  1004. int cnt, tot;
  1005. dbg->buf_len = sizeof(dump_buf) *
  1006. DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
  1007. dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
  1008. if (!dbg->buf) {
  1009. SDEROT_ERR("not enough memory to hold reg dump\n");
  1010. rc = -ENOMEM;
  1011. goto debug_read_error;
  1012. }
  1013. if (dbg->off % sizeof(u32)) {
  1014. rc = -EFAULT;
  1015. goto debug_read_error;
  1016. }
  1017. ptr = dbg->base + dbg->off;
  1018. tot = 0;
  1019. /* Enable clock for register access */
  1020. sde_rot_mgr_lock(dbg->mgr);
  1021. if (!sde_rotator_resource_ctrl_enabled(dbg->mgr)) {
  1022. SDEROT_WARN("resource ctrl is not enabled\n");
  1023. sde_rot_mgr_unlock(dbg->mgr);
  1024. goto debug_read_error;
  1025. }
  1026. sde_rotator_clk_ctrl(dbg->mgr, true);
  1027. for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
  1028. hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
  1029. ROW_BYTES, GROUP_BYTES, dump_buf,
  1030. sizeof(dump_buf), false);
  1031. len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
  1032. "0x%08x: %s\n",
  1033. ((int) (unsigned long) ptr) -
  1034. ((int) (unsigned long) dbg->base),
  1035. dump_buf);
  1036. ptr += ROW_BYTES;
  1037. tot += len;
  1038. if (tot >= dbg->buf_len)
  1039. break;
  1040. }
  1041. /* Disable clock after register access */
  1042. sde_rotator_clk_ctrl(dbg->mgr, false);
  1043. sde_rot_mgr_unlock(dbg->mgr);
  1044. dbg->buf_len = tot;
  1045. }
  1046. if (*ppos >= dbg->buf_len) {
  1047. rc = 0; /* done reading */
  1048. goto debug_read_error;
  1049. }
  1050. len = min(count, dbg->buf_len - (size_t) *ppos);
  1051. if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
  1052. SDEROT_ERR("failed to copy to user\n");
  1053. rc = -EFAULT;
  1054. goto debug_read_error;
  1055. }
  1056. *ppos += len; /* increase offset */
  1057. mutex_unlock(&dbg->buflock);
  1058. return len;
  1059. debug_read_error:
  1060. mutex_unlock(&dbg->buflock);
  1061. return rc;
  1062. }
  1063. static const struct file_operations sde_rotator_off_fops = {
  1064. .open = sde_rotator_debug_base_open,
  1065. .release = sde_rotator_debug_base_release,
  1066. .read = sde_rotator_debug_base_offset_read,
  1067. .write = sde_rotator_debug_base_offset_write,
  1068. };
  1069. static const struct file_operations sde_rotator_reg_fops = {
  1070. .open = sde_rotator_debug_base_open,
  1071. .release = sde_rotator_debug_base_release,
  1072. .read = sde_rotator_debug_base_reg_read,
  1073. .write = sde_rotator_debug_base_reg_write,
  1074. };
  1075. /*
  1076. * sde_rotator_create_debugfs - Setup rotator debugfs directory structure.
  1077. * @rot_dev: Pointer to rotator device
  1078. */
  1079. struct dentry *sde_rotator_create_debugfs(
  1080. struct sde_rotator_device *rot_dev)
  1081. {
  1082. struct dentry *debugfs_root;
  1083. char dirname[32] = {0};
  1084. snprintf(dirname, sizeof(dirname), "%s%d",
  1085. SDE_ROTATOR_DRV_NAME, rot_dev->dev->id);
  1086. debugfs_root = debugfs_create_dir(dirname, NULL);
  1087. if (!debugfs_root) {
  1088. SDEROT_ERR("fail create debugfs root\n");
  1089. return NULL;
  1090. }
  1091. if (!debugfs_create_file("stats", 0400,
  1092. debugfs_root, rot_dev, &sde_rotator_stat_ops)) {
  1093. SDEROT_ERR("fail create debugfs stats\n");
  1094. debugfs_remove_recursive(debugfs_root);
  1095. return NULL;
  1096. }
  1097. if (!debugfs_create_file("raw", 0400,
  1098. debugfs_root, rot_dev, &sde_rotator_raw_ops)) {
  1099. SDEROT_ERR("fail create debugfs raw\n");
  1100. debugfs_remove_recursive(debugfs_root);
  1101. return NULL;
  1102. }
  1103. if (!debugfs_create_u32("fence_timeout", 0400,
  1104. debugfs_root, &rot_dev->fence_timeout)) {
  1105. SDEROT_ERR("fail create fence_timeout\n");
  1106. debugfs_remove_recursive(debugfs_root);
  1107. return NULL;
  1108. }
  1109. if (!debugfs_create_u32("open_timeout", 0400,
  1110. debugfs_root, &rot_dev->open_timeout)) {
  1111. SDEROT_ERR("fail create open_timeout\n");
  1112. debugfs_remove_recursive(debugfs_root);
  1113. return NULL;
  1114. }
  1115. if (!debugfs_create_u32("disable_syscache", 0400,
  1116. debugfs_root, &rot_dev->disable_syscache)) {
  1117. SDEROT_ERR("fail create disable_syscache\n");
  1118. debugfs_remove_recursive(debugfs_root);
  1119. return NULL;
  1120. }
  1121. if (!debugfs_create_u32("streamoff_timeout", 0400,
  1122. debugfs_root, &rot_dev->streamoff_timeout)) {
  1123. SDEROT_ERR("fail create streamoff_timeout\n");
  1124. debugfs_remove_recursive(debugfs_root);
  1125. return NULL;
  1126. }
  1127. if (!debugfs_create_u32("early_submit", 0400,
  1128. debugfs_root, &rot_dev->early_submit)) {
  1129. SDEROT_ERR("fail create early_submit\n");
  1130. debugfs_remove_recursive(debugfs_root);
  1131. return NULL;
  1132. }
  1133. if (sde_rotator_base_create_debugfs(rot_dev->mdata, debugfs_root)) {
  1134. SDEROT_ERR("fail create base debugfs\n");
  1135. debugfs_remove_recursive(debugfs_root);
  1136. return NULL;
  1137. }
  1138. if (sde_rotator_core_create_debugfs(rot_dev->mgr, debugfs_root)) {
  1139. SDEROT_ERR("fail create core debugfs\n");
  1140. debugfs_remove_recursive(debugfs_root);
  1141. return NULL;
  1142. }
  1143. if (sde_rotator_evtlog_create_debugfs(rot_dev->mgr, debugfs_root)) {
  1144. SDEROT_ERR("fail create evtlog debugfs\n");
  1145. debugfs_remove_recursive(debugfs_root);
  1146. return NULL;
  1147. }
  1148. return debugfs_root;
  1149. }
  1150. /*
  1151. * sde_rotator_destroy_debugfs - Destroy rotator debugfs directory structure.
  1152. * @rot_dev: Pointer to rotator debugfs
  1153. */
  1154. void sde_rotator_destroy_debugfs(struct dentry *debugfs)
  1155. {
  1156. debugfs_remove_recursive(debugfs);
  1157. }