sde_rotator_debug.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s: " fmt, __func__
  6. #include <linux/types.h>
  7. #include <linux/kernel.h>
  8. #include <linux/slab.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/debugfs.h>
  11. #include "sde_rotator_debug.h"
  12. #include "sde_rotator_base.h"
  13. #include "sde_rotator_core.h"
  14. #include "sde_rotator_dev.h"
  15. #include "sde_rotator_trace.h"
  16. #ifdef CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG
  17. #define SDE_EVTLOG_DEFAULT_ENABLE 1
  18. #else
  19. #define SDE_EVTLOG_DEFAULT_ENABLE 0
  20. #endif
  21. #define SDE_EVTLOG_DEFAULT_PANIC 1
  22. #define SDE_EVTLOG_DEFAULT_REGDUMP SDE_ROT_DBG_DUMP_IN_MEM
  23. #define SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
  24. #define SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
  25. /*
  26. * evtlog will print this number of entries when it is called through
  27. * sysfs node or panic. This prevents kernel log from evtlog message
  28. * flood.
  29. */
  30. #define SDE_ROT_EVTLOG_PRINT_ENTRY 256
  31. /*
  32. * evtlog keeps this number of entries in memory for debug purpose. This
  33. * number must be greater than print entry to prevent out of bound evtlog
  34. * entry array access.
  35. */
  36. #define SDE_ROT_EVTLOG_ENTRY (SDE_ROT_EVTLOG_PRINT_ENTRY * 4)
  37. #define SDE_ROT_EVTLOG_MAX_DATA 15
  38. #define SDE_ROT_EVTLOG_BUF_MAX 512
  39. #define SDE_ROT_EVTLOG_BUF_ALIGN 32
  40. #define SDE_ROT_DEBUG_BASE_MAX 10
  41. #define SDE_ROT_DEFAULT_BASE_REG_CNT 0x100
  42. #define GROUP_BYTES 4
  43. #define ROW_BYTES 16
  44. #define SDE_ROT_TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
  45. #if defined(CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG) && \
  46. defined(CONFIG_DEBUG_FS)
  47. static DEFINE_SPINLOCK(sde_rot_xlock);
  48. /*
  49. * tlog - EVTLOG entry structure
  50. * @counter - EVTLOG entriy counter
  51. * @time - timestamp of EVTLOG entry
  52. * @name - function name of EVTLOG entry
  53. * @line - line number of EVTLOG entry
  54. * @data - EVTLOG data contents
  55. * @data_cnt - number of data contents
  56. * @pid - pid of current calling thread
  57. */
  58. struct tlog {
  59. u32 counter;
  60. s64 time;
  61. const char *name;
  62. int line;
  63. u32 data[SDE_ROT_EVTLOG_MAX_DATA];
  64. u32 data_cnt;
  65. int pid;
  66. };
  67. /*
  68. * sde_rot_dbg_evtlog - EVTLOG debug data structure
  69. * @logs - EVTLOG entries
  70. * @first - first entry index in the EVTLOG
  71. * @last - last entry index in the EVTLOG
  72. * @curr - curr entry index in the EVTLOG
  73. * @evtlog - EVTLOG debugfs handle
  74. * @evtlog_enable - boolean indicates EVTLOG enable/disable
  75. * @panic_on_err - boolean indicates issue panic after EVTLOG dump
  76. * @enable_reg_dump - control in-log/memory dump for rotator registers
  77. * @enable_vbif_dbgbus_dump - control in-log/memory dump for VBIF debug bus
  78. * @enable_rot_dbgbus_dump - control in-log/memroy dump for rotator debug bus
  79. * @evtlog_dump_work - schedule work strucutre for timeout handler
  80. * @work_dump_reg - storage for register dump control in schedule work
  81. * @work_panic - storage for panic control in schedule work
  82. * @work_vbif_dbgbus - storage for VBIF debug bus control in schedule work
  83. * @work_rot_dbgbus - storage for rotator debug bus control in schedule work
  84. * @nrt_vbif_dbgbus_dump - memory buffer for VBIF debug bus dumping
  85. * @rot_dbgbus_dump - memory buffer for rotator debug bus dumping
  86. * @reg_dump_array - memory buffer for rotator registers dumping
  87. */
  88. struct sde_rot_dbg_evtlog {
  89. struct tlog logs[SDE_ROT_EVTLOG_ENTRY];
  90. u32 first;
  91. u32 last;
  92. u32 curr;
  93. struct dentry *evtlog;
  94. u32 evtlog_enable;
  95. u32 panic_on_err;
  96. u32 enable_reg_dump;
  97. u32 enable_vbif_dbgbus_dump;
  98. u32 enable_rot_dbgbus_dump;
  99. struct work_struct evtlog_dump_work;
  100. bool work_dump_reg;
  101. bool work_panic;
  102. bool work_vbif_dbgbus;
  103. bool work_rot_dbgbus;
  104. u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
  105. u32 *rot_dbgbus_dump;
  106. u32 *reg_dump_array[SDE_ROT_DEBUG_BASE_MAX];
  107. } sde_rot_dbg_evtlog;
  108. static void sde_rot_dump_debug_bus(u32 bus_dump_flag, u32 **dump_mem)
  109. {
  110. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  111. bool in_log, in_mem;
  112. u32 *dump_addr = NULL;
  113. u32 status = 0;
  114. struct sde_rot_debug_bus *head;
  115. int i;
  116. u32 offset;
  117. void __iomem *base;
  118. in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  119. in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  120. base = mdata->sde_io.base;
  121. if (!base || !mdata->rot_dbg_bus || !mdata->rot_dbg_bus_size)
  122. return;
  123. pr_info("======== SDE Rotator Debug bus DUMP =========\n");
  124. if (in_mem) {
  125. if (!(*dump_mem))
  126. *dump_mem = devm_kzalloc(&mdata->pdev->dev,
  127. mdata->rot_dbg_bus_size * 4 * sizeof(u32),
  128. GFP_KERNEL);
  129. if (*dump_mem) {
  130. dump_addr = *dump_mem;
  131. pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
  132. __func__, dump_addr,
  133. dump_addr + (u32)mdata->rot_dbg_bus_size * 16);
  134. } else {
  135. in_mem = false;
  136. pr_err("dump_mem: allocation fails\n");
  137. }
  138. }
  139. sde_smmu_ctrl(1);
  140. for (i = 0; i < mdata->rot_dbg_bus_size; i++) {
  141. head = mdata->rot_dbg_bus + i;
  142. writel_relaxed(SDE_ROT_TEST_MASK(head->block_id, head->test_id),
  143. base + head->wr_addr);
  144. wmb(); /* make sure test bits were written */
  145. offset = head->wr_addr + 0x4;
  146. status = readl_relaxed(base + offset);
  147. if (in_log)
  148. pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
  149. head->wr_addr, head->block_id, head->test_id,
  150. status);
  151. if (dump_addr && in_mem) {
  152. dump_addr[i*4] = head->wr_addr;
  153. dump_addr[i*4 + 1] = head->block_id;
  154. dump_addr[i*4 + 2] = head->test_id;
  155. dump_addr[i*4 + 3] = status;
  156. }
  157. /* Disable debug bus once we are done */
  158. writel_relaxed(0, base + head->wr_addr);
  159. }
  160. sde_smmu_ctrl(0);
  161. pr_info("========End Debug bus=========\n");
  162. }
  163. /*
  164. * sde_rot_evtlog_is_enabled - helper function for checking EVTLOG
  165. * enable/disable
  166. * @flag - EVTLOG option flag
  167. */
  168. static inline bool sde_rot_evtlog_is_enabled(u32 flag)
  169. {
  170. return (flag & sde_rot_dbg_evtlog.evtlog_enable) ||
  171. (flag == SDE_ROT_EVTLOG_ALL &&
  172. sde_rot_dbg_evtlog.evtlog_enable);
  173. }
  174. /*
  175. * __vbif_debug_bus - helper function for VBIF debug bus dump
  176. * @head - VBIF debug bus data structure
  177. * @vbif_base - VBIF IO mapped address
  178. * @dump_addr - output buffer for memory dump option
  179. * @in_log - boolean indicates in-log dump option
  180. */
  181. static void __vbif_debug_bus(struct sde_rot_vbif_debug_bus *head,
  182. void __iomem *vbif_base, u32 *dump_addr, bool in_log)
  183. {
  184. int i, j;
  185. u32 val;
  186. if (!dump_addr && !in_log)
  187. return;
  188. for (i = 0; i < head->block_cnt; i++) {
  189. writel_relaxed(1 << (i + head->bit_offset),
  190. vbif_base + head->block_bus_addr);
  191. /* make sure that current bus blcok enable */
  192. wmb();
  193. for (j = 0; j < head->test_pnt_cnt; j++) {
  194. writel_relaxed(j, vbif_base + head->block_bus_addr + 4);
  195. /* make sure that test point is enabled */
  196. wmb();
  197. val = readl_relaxed(vbif_base + MMSS_VBIF_TEST_BUS_OUT);
  198. if (dump_addr) {
  199. *dump_addr++ = head->block_bus_addr;
  200. *dump_addr++ = i;
  201. *dump_addr++ = j;
  202. *dump_addr++ = val;
  203. }
  204. if (in_log)
  205. pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
  206. head->block_bus_addr, i, j, val);
  207. }
  208. }
  209. }
  210. /*
  211. * sde_rot_dump_vbif_debug_bus - VBIF debug bus dump
  212. * @bus_dump_flag - dump flag controlling in-log/memory dump option
  213. * @dump_mem - output buffer for memory dump location
  214. */
  215. static void sde_rot_dump_vbif_debug_bus(u32 bus_dump_flag,
  216. u32 **dump_mem)
  217. {
  218. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  219. bool in_log, in_mem;
  220. u32 *dump_addr = NULL;
  221. u32 value;
  222. struct sde_rot_vbif_debug_bus *head;
  223. int i, list_size = 0;
  224. void __iomem *vbif_base;
  225. struct sde_rot_vbif_debug_bus *dbg_bus;
  226. u32 bus_size;
  227. pr_info("======== NRT VBIF Debug bus DUMP =========\n");
  228. vbif_base = mdata->vbif_nrt_io.base;
  229. dbg_bus = mdata->nrt_vbif_dbg_bus;
  230. bus_size = mdata->nrt_vbif_dbg_bus_size;
  231. if (!vbif_base || !dbg_bus || !bus_size)
  232. return;
  233. /* allocate memory for each test point */
  234. for (i = 0; i < bus_size; i++) {
  235. head = dbg_bus + i;
  236. list_size += (head->block_cnt * head->test_pnt_cnt);
  237. }
  238. /* 4 bytes * 4 entries for each test point*/
  239. list_size *= 16;
  240. in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  241. in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  242. if (in_mem) {
  243. if (!(*dump_mem))
  244. *dump_mem = devm_kzalloc(&mdata->pdev->dev, list_size,
  245. GFP_KERNEL);
  246. if (*dump_mem) {
  247. dump_addr = *dump_mem;
  248. pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
  249. __func__, dump_addr, dump_addr + list_size);
  250. } else {
  251. in_mem = false;
  252. pr_err("dump_mem: allocation fails\n");
  253. }
  254. }
  255. sde_smmu_ctrl(1);
  256. value = readl_relaxed(vbif_base + MMSS_VBIF_CLKON);
  257. writel_relaxed(value | BIT(1), vbif_base + MMSS_VBIF_CLKON);
  258. /* make sure that vbif core is on */
  259. wmb();
  260. for (i = 0; i < bus_size; i++) {
  261. head = dbg_bus + i;
  262. writel_relaxed(0, vbif_base + head->disable_bus_addr);
  263. writel_relaxed(BIT(0), vbif_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
  264. /* make sure that other bus is off */
  265. wmb();
  266. __vbif_debug_bus(head, vbif_base, dump_addr, in_log);
  267. if (dump_addr)
  268. dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
  269. }
  270. sde_smmu_ctrl(0);
  271. pr_info("========End VBIF Debug bus=========\n");
  272. }
  273. /*
  274. * sde_rot_dump_reg - helper function for dumping rotator register set content
  275. * @dump_name - register set name
  276. * @reg_dump_flag - dumping flag controlling in-log/memory dump location
  277. * @access - access type, sde registers or vbif registers
  278. * @addr - starting address offset for dumping
  279. * @len - range of the register set
  280. * @dump_mem - output buffer for memory dump location option
  281. */
  282. void sde_rot_dump_reg(const char *dump_name, u32 reg_dump_flag,
  283. enum sde_rot_regdump_access access, u32 addr,
  284. int len, u32 **dump_mem)
  285. {
  286. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  287. bool in_log, in_mem;
  288. u32 *dump_addr = NULL;
  289. int i;
  290. void __iomem *base;
  291. in_log = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  292. in_mem = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  293. pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
  294. reg_dump_flag, in_log, in_mem);
  295. if (len % 16)
  296. len += 16;
  297. len /= 16;
  298. if (in_mem) {
  299. if (!(*dump_mem))
  300. *dump_mem = devm_kzalloc(&mdata->pdev->dev, len * 16,
  301. GFP_KERNEL);
  302. if (*dump_mem) {
  303. dump_addr = *dump_mem;
  304. pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%X\n",
  305. dump_name, dump_addr, dump_addr + (u32)len * 16,
  306. addr);
  307. } else {
  308. in_mem = false;
  309. pr_err("dump_mem: kzalloc fails!\n");
  310. }
  311. }
  312. base = mdata->sde_io.base;
  313. /*
  314. * VBIF NRT base handling
  315. */
  316. if (access == SDE_ROT_REGDUMP_VBIF)
  317. base = mdata->vbif_nrt_io.base;
  318. for (i = 0; i < len; i++) {
  319. u32 x0, x4, x8, xc;
  320. x0 = readl_relaxed(base + addr+0x0);
  321. x4 = readl_relaxed(base + addr+0x4);
  322. x8 = readl_relaxed(base + addr+0x8);
  323. xc = readl_relaxed(base + addr+0xc);
  324. if (in_log)
  325. pr_info("0x%08X : %08x %08x %08x %08x\n",
  326. addr, x0, x4, x8, xc);
  327. if (dump_addr && in_mem) {
  328. dump_addr[i*4] = x0;
  329. dump_addr[i*4 + 1] = x4;
  330. dump_addr[i*4 + 2] = x8;
  331. dump_addr[i*4 + 3] = xc;
  332. }
  333. addr += 16;
  334. }
  335. }
  336. /*
  337. * sde_rot_dump_reg_all - dumping all SDE rotator registers
  338. */
  339. static void sde_rot_dump_reg_all(void)
  340. {
  341. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  342. struct sde_rot_regdump *head, *regdump;
  343. u32 regdump_size;
  344. int i;
  345. regdump = mdata->regdump;
  346. regdump_size = mdata->regdump_size;
  347. if (!regdump || !regdump_size)
  348. return;
  349. /* Enable clock to rotator if not yet enabled */
  350. sde_smmu_ctrl(1);
  351. for (i = 0; (i < regdump_size) && (i < SDE_ROT_DEBUG_BASE_MAX); i++) {
  352. head = &regdump[i];
  353. if (head->access == SDE_ROT_REGDUMP_WRITE) {
  354. if (head->len != 1) {
  355. SDEROT_ERR("invalid write len %u\n", head->len);
  356. continue;
  357. }
  358. writel_relaxed(head->value,
  359. mdata->sde_io.base + head->offset);
  360. /* Make sure write go through */
  361. wmb();
  362. } else {
  363. sde_rot_dump_reg(head->name,
  364. sde_rot_dbg_evtlog.enable_reg_dump,
  365. head->access,
  366. head->offset, head->len,
  367. &sde_rot_dbg_evtlog.reg_dump_array[i]);
  368. }
  369. }
  370. /* Disable rotator clock */
  371. sde_smmu_ctrl(0);
  372. }
  373. /*
  374. * __sde_rot_evtlog_dump_calc_range - calculate dump range for EVTLOG
  375. */
  376. static bool __sde_rot_evtlog_dump_calc_range(void)
  377. {
  378. static u32 next;
  379. bool need_dump = true;
  380. unsigned long flags;
  381. struct sde_rot_dbg_evtlog *evtlog = &sde_rot_dbg_evtlog;
  382. spin_lock_irqsave(&sde_rot_xlock, flags);
  383. evtlog->first = next;
  384. if (evtlog->last == evtlog->first) {
  385. need_dump = false;
  386. goto dump_exit;
  387. }
  388. if (evtlog->last < evtlog->first) {
  389. evtlog->first %= SDE_ROT_EVTLOG_ENTRY;
  390. if (evtlog->last < evtlog->first)
  391. evtlog->last += SDE_ROT_EVTLOG_ENTRY;
  392. }
  393. if ((evtlog->last - evtlog->first) > SDE_ROT_EVTLOG_PRINT_ENTRY) {
  394. pr_warn("evtlog buffer overflow before dump: %d\n",
  395. evtlog->last - evtlog->first);
  396. evtlog->first = evtlog->last - SDE_ROT_EVTLOG_PRINT_ENTRY;
  397. }
  398. next = evtlog->first + 1;
  399. dump_exit:
  400. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  401. return need_dump;
  402. }
  403. /*
  404. * sde_rot_evtlog_dump_entry - helper function for EVTLOG content dumping
  405. * @evtlog_buf: EVTLOG dump output buffer
  406. * @evtlog_buf_size: EVTLOG output buffer size
  407. */
  408. static ssize_t sde_rot_evtlog_dump_entry(char *evtlog_buf,
  409. ssize_t evtlog_buf_size)
  410. {
  411. int i;
  412. ssize_t off = 0;
  413. struct tlog *log, *prev_log;
  414. unsigned long flags;
  415. spin_lock_irqsave(&sde_rot_xlock, flags);
  416. log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.first %
  417. SDE_ROT_EVTLOG_ENTRY];
  418. prev_log = &sde_rot_dbg_evtlog.logs[(sde_rot_dbg_evtlog.first - 1) %
  419. SDE_ROT_EVTLOG_ENTRY];
  420. off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
  421. log->name, log->line);
  422. if (off < SDE_ROT_EVTLOG_BUF_ALIGN) {
  423. memset((evtlog_buf + off), 0x20,
  424. (SDE_ROT_EVTLOG_BUF_ALIGN - off));
  425. off = SDE_ROT_EVTLOG_BUF_ALIGN;
  426. }
  427. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
  428. "=>[%-8d:%-11llu:%9llu][%-4d]:", sde_rot_dbg_evtlog.first,
  429. log->time, (log->time - prev_log->time), log->pid);
  430. for (i = 0; i < log->data_cnt; i++)
  431. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
  432. "%x ", log->data[i]);
  433. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
  434. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  435. return off;
  436. }
  437. /*
  438. * sde_rot_evtlog_dump_all - Dumping all content in EVTLOG buffer
  439. */
  440. static void sde_rot_evtlog_dump_all(void)
  441. {
  442. char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
  443. while (__sde_rot_evtlog_dump_calc_range()) {
  444. sde_rot_evtlog_dump_entry(evtlog_buf, SDE_ROT_EVTLOG_BUF_MAX);
  445. pr_info("%s\n", evtlog_buf);
  446. }
  447. }
  448. /*
  449. * sde_rot_evtlog_dump_open - debugfs open handler for evtlog dump
  450. * @inode: debugfs inode
  451. * @file: file handler
  452. */
  453. static int sde_rot_evtlog_dump_open(struct inode *inode, struct file *file)
  454. {
  455. /* non-seekable */
  456. file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
  457. file->private_data = inode->i_private;
  458. return 0;
  459. }
  460. /*
  461. * sde_rot_evtlog_dump_read - debugfs read handler for evtlog dump
  462. * @file: file handler
  463. * @buff: user buffer content for debugfs
  464. * @count: size of user buffer
  465. * @ppos: position offset of user buffer
  466. */
  467. static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff,
  468. size_t count, loff_t *ppos)
  469. {
  470. ssize_t len = 0;
  471. char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
  472. if (__sde_rot_evtlog_dump_calc_range()) {
  473. len = sde_rot_evtlog_dump_entry(evtlog_buf,
  474. SDE_ROT_EVTLOG_BUF_MAX);
  475. if (len < 0 || len > count) {
  476. pr_err("len is more than the user buffer size\n");
  477. return 0;
  478. }
  479. if (copy_to_user(buff, evtlog_buf, len))
  480. return -EFAULT;
  481. *ppos += len;
  482. }
  483. return len;
  484. }
  485. /*
  486. * sde_rot_evtlog_dump_helper - helper function for evtlog dump
  487. * @dead: boolean indicates panic after dump
  488. * @panic_name: Panic signature name show up in log
  489. * @dump_rot: boolean indicates rotator register dump
  490. * @dump_vbif_debug_bus: boolean indicates VBIF debug bus dump
  491. */
  492. static void sde_rot_evtlog_dump_helper(bool dead, const char *panic_name,
  493. bool dump_rot, bool dump_vbif_debug_bus, bool dump_rot_debug_bus)
  494. {
  495. sde_rot_evtlog_dump_all();
  496. if (dump_rot_debug_bus)
  497. sde_rot_dump_debug_bus(
  498. sde_rot_dbg_evtlog.enable_rot_dbgbus_dump,
  499. &sde_rot_dbg_evtlog.rot_dbgbus_dump);
  500. if (dump_vbif_debug_bus)
  501. sde_rot_dump_vbif_debug_bus(
  502. sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump,
  503. &sde_rot_dbg_evtlog.nrt_vbif_dbgbus_dump);
  504. /*
  505. * Rotator registers always dump last
  506. */
  507. if (dump_rot)
  508. sde_rot_dump_reg_all();
  509. if (dead)
  510. panic(panic_name);
  511. }
  512. /*
  513. * sde_rot_evtlog_debug_work - schedule work function for evtlog dump
  514. * @work: schedule work structure
  515. */
  516. static void sde_rot_evtlog_debug_work(struct work_struct *work)
  517. {
  518. sde_rot_evtlog_dump_helper(
  519. sde_rot_dbg_evtlog.work_panic,
  520. "evtlog_workitem",
  521. sde_rot_dbg_evtlog.work_dump_reg,
  522. sde_rot_dbg_evtlog.work_vbif_dbgbus,
  523. sde_rot_dbg_evtlog.work_rot_dbgbus);
  524. }
  525. /*
  526. * sde_rot_evtlog_tout_handler - log dump timeout handler
  527. * @queue: boolean indicate putting log dump into queue
  528. * @name: function name having timeout
  529. */
  530. void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
  531. {
  532. int i;
  533. bool dead = false;
  534. bool dump_rot = false;
  535. bool dump_vbif_dbgbus = false;
  536. bool dump_rot_dbgbus = false;
  537. char *blk_name = NULL;
  538. va_list args;
  539. if (!sde_rot_evtlog_is_enabled(SDE_ROT_EVTLOG_DEFAULT))
  540. return;
  541. if (queue && work_pending(&sde_rot_dbg_evtlog.evtlog_dump_work))
  542. return;
  543. va_start(args, name);
  544. for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
  545. blk_name = va_arg(args, char*);
  546. if (IS_ERR_OR_NULL(blk_name))
  547. break;
  548. if (!strcmp(blk_name, "rot"))
  549. dump_rot = true;
  550. if (!strcmp(blk_name, "vbif_dbg_bus"))
  551. dump_vbif_dbgbus = true;
  552. if (!strcmp(blk_name, "rot_dbg_bus"))
  553. dump_rot_dbgbus = true;
  554. if (!strcmp(blk_name, "panic"))
  555. dead = true;
  556. }
  557. va_end(args);
  558. if (queue) {
  559. /* schedule work to dump later */
  560. sde_rot_dbg_evtlog.work_panic = dead;
  561. sde_rot_dbg_evtlog.work_dump_reg = dump_rot;
  562. sde_rot_dbg_evtlog.work_vbif_dbgbus = dump_vbif_dbgbus;
  563. sde_rot_dbg_evtlog.work_rot_dbgbus = dump_rot_dbgbus;
  564. schedule_work(&sde_rot_dbg_evtlog.evtlog_dump_work);
  565. } else {
  566. sde_rot_evtlog_dump_helper(dead, name, dump_rot,
  567. dump_vbif_dbgbus, dump_rot_dbgbus);
  568. }
  569. }
  570. /*
  571. * sde_rot_evtlog - log contents into memory for dump analysis
  572. * @name: Name of function calling evtlog
  573. * @line: line number of calling function
  574. * @flag: Log control flag
  575. */
  576. void sde_rot_evtlog(const char *name, int line, int flag, ...)
  577. {
  578. unsigned long flags;
  579. int i, val = 0;
  580. va_list args;
  581. struct tlog *log;
  582. if (!sde_rot_evtlog_is_enabled(flag))
  583. return;
  584. spin_lock_irqsave(&sde_rot_xlock, flags);
  585. log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.curr];
  586. log->time = ktime_to_us(ktime_get());
  587. log->name = name;
  588. log->line = line;
  589. log->data_cnt = 0;
  590. log->pid = current->pid;
  591. va_start(args, flag);
  592. for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
  593. val = va_arg(args, int);
  594. if (val == SDE_ROT_DATA_LIMITER)
  595. break;
  596. log->data[i] = val;
  597. }
  598. va_end(args);
  599. log->data_cnt = i;
  600. sde_rot_dbg_evtlog.curr =
  601. (sde_rot_dbg_evtlog.curr + 1) % SDE_ROT_EVTLOG_ENTRY;
  602. sde_rot_dbg_evtlog.last++;
  603. trace_sde_rot_evtlog(name, line, log->data_cnt, log->data);
  604. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  605. }
  606. /*
  607. * sde_rotator_stat_show - Show statistics on read to this debugfs file
  608. * @s: Pointer to sequence file structure
  609. * @data: Pointer to private data structure
  610. */
  611. static int sde_rotator_stat_show(struct seq_file *s, void *data)
  612. {
  613. int i, offset;
  614. struct sde_rotator_device *rot_dev = s->private;
  615. struct sde_rotator_statistics *stats = &rot_dev->stats;
  616. u64 count = stats->count;
  617. int num_events;
  618. s64 proc_max, proc_min, proc_avg;
  619. s64 swoh_max, swoh_min, swoh_avg;
  620. proc_max = 0;
  621. proc_min = S64_MAX;
  622. proc_avg = 0;
  623. swoh_max = 0;
  624. swoh_min = S64_MAX;
  625. swoh_avg = 0;
  626. if (count > SDE_ROTATOR_NUM_EVENTS) {
  627. num_events = SDE_ROTATOR_NUM_EVENTS;
  628. offset = count % SDE_ROTATOR_NUM_EVENTS;
  629. } else {
  630. num_events = count;
  631. offset = 0;
  632. }
  633. for (i = 0; i < num_events; i++) {
  634. int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
  635. ktime_t *ts = stats->ts[k];
  636. ktime_t start_time =
  637. ktime_before(ts[SDE_ROTATOR_TS_SRCQB],
  638. ts[SDE_ROTATOR_TS_DSTQB]) ?
  639. ts[SDE_ROTATOR_TS_SRCQB] :
  640. ts[SDE_ROTATOR_TS_DSTQB];
  641. s64 proc_time =
  642. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
  643. start_time));
  644. s64 sw_overhead_time =
  645. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
  646. start_time));
  647. seq_printf(s,
  648. "s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld st:%lld fl:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
  649. i,
  650. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
  651. ts[SDE_ROTATOR_TS_SRCQB])),
  652. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
  653. ts[SDE_ROTATOR_TS_DSTQB])),
  654. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_QUEUE],
  655. ts[SDE_ROTATOR_TS_FENCE])),
  656. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_COMMIT],
  657. ts[SDE_ROTATOR_TS_QUEUE])),
  658. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_START],
  659. ts[SDE_ROTATOR_TS_COMMIT])),
  660. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
  661. ts[SDE_ROTATOR_TS_START])),
  662. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
  663. ts[SDE_ROTATOR_TS_FLUSH])),
  664. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
  665. ts[SDE_ROTATOR_TS_DONE])),
  666. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_SRCDQB],
  667. ts[SDE_ROTATOR_TS_RETIRE])),
  668. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DSTDQB],
  669. ts[SDE_ROTATOR_TS_RETIRE])),
  670. proc_time, sw_overhead_time);
  671. proc_max = max(proc_max, proc_time);
  672. proc_min = min(proc_min, proc_time);
  673. proc_avg += proc_time;
  674. swoh_max = max(swoh_max, sw_overhead_time);
  675. swoh_min = min(swoh_min, sw_overhead_time);
  676. swoh_avg += sw_overhead_time;
  677. }
  678. proc_avg = (num_events) ?
  679. DIV_ROUND_CLOSEST_ULL(proc_avg, num_events) : 0;
  680. swoh_avg = (num_events) ?
  681. DIV_ROUND_CLOSEST_ULL(swoh_avg, num_events) : 0;
  682. seq_printf(s, "count:%llu\n", count);
  683. seq_printf(s, "fai1:%llu\n", stats->fail_count);
  684. seq_printf(s, "t_max:%lld\n", proc_max);
  685. seq_printf(s, "t_min:%lld\n", proc_min);
  686. seq_printf(s, "t_avg:%lld\n", proc_avg);
  687. seq_printf(s, "swoh_max:%lld\n", swoh_max);
  688. seq_printf(s, "swoh_min:%lld\n", swoh_min);
  689. seq_printf(s, "swoh_avg:%lld\n", swoh_avg);
  690. return 0;
  691. }
  692. /*
  693. * sde_rotator_raw_show - Show raw statistics on read from this debugfs file
  694. * @s: Pointer to sequence file structure
  695. * @data: Pointer to private data structure
  696. */
  697. static int sde_rotator_raw_show(struct seq_file *s, void *data)
  698. {
  699. int i, j, offset;
  700. struct sde_rotator_device *rot_dev = s->private;
  701. struct sde_rotator_statistics *stats = &rot_dev->stats;
  702. u64 count = stats->count;
  703. int num_events;
  704. if (count > SDE_ROTATOR_NUM_EVENTS) {
  705. num_events = SDE_ROTATOR_NUM_EVENTS;
  706. offset = count % SDE_ROTATOR_NUM_EVENTS;
  707. } else {
  708. num_events = count;
  709. offset = 0;
  710. }
  711. for (i = 0; i < num_events; i++) {
  712. int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
  713. ktime_t *ts = stats->ts[k];
  714. seq_printf(s, "%d ", i);
  715. for (j = 0; j < SDE_ROTATOR_NUM_TIMESTAMPS; j++)
  716. seq_printf(s, "%lld ", ktime_to_us(ts[j]));
  717. seq_puts(s, "\n");
  718. }
  719. return 0;
  720. }
  721. /*
  722. * sde_rotator_dbg_open - Processed statistics debugfs file open function
  723. * @inode:
  724. * @file:
  725. */
  726. static int sde_rotator_stat_open(struct inode *inode, struct file *file)
  727. {
  728. return single_open(file, sde_rotator_stat_show, inode->i_private);
  729. }
  730. /*
  731. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  732. * @inode:
  733. * @file:
  734. */
  735. static int sde_rotator_raw_open(struct inode *inode, struct file *file)
  736. {
  737. return single_open(file, sde_rotator_raw_show, inode->i_private);
  738. }
  739. /*
  740. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  741. * @mdata: Pointer to rotator global data
  742. * @debugfs_root: Pointer to parent debugfs node
  743. */
  744. static int sde_rotator_base_create_debugfs(
  745. struct sde_rot_data_type *mdata,
  746. struct dentry *debugfs_root)
  747. {
  748. debugfs_create_u32("iommu_ref_cnt", 0444, debugfs_root, &mdata->iommu_ref_cnt);
  749. mdata->clk_always_on = false;
  750. if (!debugfs_create_bool("clk_always_on", 0644,
  751. debugfs_root, &mdata->clk_always_on)) {
  752. SDEROT_WARN("failed to create debugfs clk_always_on\n");
  753. return -EINVAL;
  754. }
  755. return 0;
  756. }
  757. /*
  758. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  759. * @mgr: Pointer to rotator manager structure
  760. * @debugfs_root: Pointer to parent debugfs node
  761. */
  762. static int sde_rotator_core_create_debugfs(
  763. struct sde_rot_mgr *mgr,
  764. struct dentry *debugfs_root)
  765. {
  766. int ret;
  767. debugfs_create_u32("hwacquire_timeout", 0400, debugfs_root, &mgr->hwacquire_timeout);
  768. debugfs_create_u32("ppc_numer", 0644, debugfs_root, &mgr->pixel_per_clk.numer);
  769. debugfs_create_u32("ppc_denom", 0600, debugfs_root, &mgr->pixel_per_clk.denom);
  770. if (!debugfs_create_u64("enable_bw_vote", 0644,
  771. debugfs_root, &mgr->enable_bw_vote)) {
  772. SDEROT_WARN("failed to create enable_bw_vote\n");
  773. return -EINVAL;
  774. }
  775. if (mgr->ops_hw_create_debugfs) {
  776. ret = mgr->ops_hw_create_debugfs(mgr, debugfs_root);
  777. if (ret)
  778. return ret;
  779. }
  780. return 0;
  781. }
  782. static const struct file_operations sde_rot_evtlog_fops = {
  783. .open = sde_rot_evtlog_dump_open,
  784. .read = sde_rot_evtlog_dump_read,
  785. };
  786. static int sde_rotator_evtlog_create_debugfs(
  787. struct sde_rot_mgr *mgr,
  788. struct dentry *debugfs_root)
  789. {
  790. int i;
  791. sde_rot_dbg_evtlog.evtlog = debugfs_create_dir("evtlog", debugfs_root);
  792. if (IS_ERR_OR_NULL(sde_rot_dbg_evtlog.evtlog)) {
  793. pr_err("debugfs_create_dir fail, error %ld\n",
  794. PTR_ERR(sde_rot_dbg_evtlog.evtlog));
  795. sde_rot_dbg_evtlog.evtlog = NULL;
  796. return -ENODEV;
  797. }
  798. INIT_WORK(&sde_rot_dbg_evtlog.evtlog_dump_work,
  799. sde_rot_evtlog_debug_work);
  800. sde_rot_dbg_evtlog.work_panic = false;
  801. for (i = 0; i < SDE_ROT_EVTLOG_ENTRY; i++)
  802. sde_rot_dbg_evtlog.logs[i].counter = i;
  803. debugfs_create_file("dump", 0644, sde_rot_dbg_evtlog.evtlog, NULL,
  804. &sde_rot_evtlog_fops);
  805. debugfs_create_u32("enable", 0644, sde_rot_dbg_evtlog.evtlog,
  806. &sde_rot_dbg_evtlog.evtlog_enable);
  807. debugfs_create_u32("panic", 0644, sde_rot_dbg_evtlog.evtlog,
  808. &sde_rot_dbg_evtlog.panic_on_err);
  809. debugfs_create_u32("reg_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  810. &sde_rot_dbg_evtlog.enable_reg_dump);
  811. debugfs_create_u32("vbif_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  812. &sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump);
  813. debugfs_create_u32("rot_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  814. &sde_rot_dbg_evtlog.enable_rot_dbgbus_dump);
  815. sde_rot_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
  816. sde_rot_dbg_evtlog.panic_on_err = SDE_EVTLOG_DEFAULT_PANIC;
  817. sde_rot_dbg_evtlog.enable_reg_dump = SDE_EVTLOG_DEFAULT_REGDUMP;
  818. sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump =
  819. SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP;
  820. sde_rot_dbg_evtlog.enable_rot_dbgbus_dump =
  821. SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP;
  822. pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
  823. sde_rot_dbg_evtlog.evtlog_enable,
  824. sde_rot_dbg_evtlog.panic_on_err,
  825. sde_rot_dbg_evtlog.enable_reg_dump);
  826. return 0;
  827. }
  828. /*
  829. * struct sde_rotator_stat_ops - processed statistics file operations
  830. */
  831. static const struct file_operations sde_rotator_stat_ops = {
  832. .open = sde_rotator_stat_open,
  833. .read = seq_read,
  834. .llseek = seq_lseek,
  835. .release = single_release
  836. };
  837. /*
  838. * struct sde_rotator_raw_ops - raw statistics file operations
  839. */
  840. static const struct file_operations sde_rotator_raw_ops = {
  841. .open = sde_rotator_raw_open,
  842. .read = seq_read,
  843. .llseek = seq_lseek,
  844. .release = single_release
  845. };
  846. static int sde_rotator_debug_base_open(struct inode *inode, struct file *file)
  847. {
  848. /* non-seekable */
  849. file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
  850. file->private_data = inode->i_private;
  851. return 0;
  852. }
  853. static int sde_rotator_debug_base_release(struct inode *inode,
  854. struct file *file)
  855. {
  856. struct sde_rotator_debug_base *dbg = file->private_data;
  857. if (dbg) {
  858. mutex_lock(&dbg->buflock);
  859. kfree(dbg->buf);
  860. dbg->buf_len = 0;
  861. dbg->buf = NULL;
  862. mutex_unlock(&dbg->buflock);
  863. }
  864. return 0;
  865. }
  866. static ssize_t sde_rotator_debug_base_offset_write(struct file *file,
  867. const char __user *user_buf, size_t count, loff_t *ppos)
  868. {
  869. struct sde_rotator_debug_base *dbg = file->private_data;
  870. u32 off = 0;
  871. u32 cnt = SDE_ROT_DEFAULT_BASE_REG_CNT;
  872. char buf[24];
  873. if (!dbg)
  874. return -ENODEV;
  875. if (count >= sizeof(buf))
  876. return -EFAULT;
  877. if (copy_from_user(buf, user_buf, count))
  878. return -EFAULT;
  879. buf[count] = 0;
  880. if (sscanf(buf, "%5x %x", &off, &cnt) < 2)
  881. return -EINVAL;
  882. if (off % sizeof(u32))
  883. return -EINVAL;
  884. if (off > dbg->max_offset)
  885. return -EINVAL;
  886. if (cnt > (dbg->max_offset - off))
  887. cnt = dbg->max_offset - off;
  888. mutex_lock(&dbg->buflock);
  889. dbg->off = off;
  890. dbg->cnt = cnt;
  891. mutex_unlock(&dbg->buflock);
  892. SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
  893. return count;
  894. }
  895. static ssize_t sde_rotator_debug_base_offset_read(struct file *file,
  896. char __user *buff, size_t count, loff_t *ppos)
  897. {
  898. struct sde_rotator_debug_base *dbg = file->private_data;
  899. int len = 0;
  900. char buf[24] = {'\0'};
  901. if (!dbg)
  902. return -ENODEV;
  903. if (*ppos)
  904. return 0; /* the end */
  905. mutex_lock(&dbg->buflock);
  906. len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
  907. mutex_unlock(&dbg->buflock);
  908. if (len < 0 || len >= sizeof(buf))
  909. return 0;
  910. if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
  911. return -EFAULT;
  912. *ppos += len; /* increase offset */
  913. return len;
  914. }
  915. static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
  916. const char __user *user_buf, size_t count, loff_t *ppos)
  917. {
  918. struct sde_rotator_debug_base *dbg = file->private_data;
  919. size_t off;
  920. u32 data, cnt;
  921. char buf[24];
  922. if (!dbg)
  923. return -ENODEV;
  924. if (count >= sizeof(buf))
  925. return -EFAULT;
  926. if (copy_from_user(buf, user_buf, count))
  927. return -EFAULT;
  928. buf[count] = 0;
  929. cnt = sscanf(buf, "%zx %x", &off, &data);
  930. if (cnt < 2)
  931. return -EFAULT;
  932. if (off % sizeof(u32))
  933. return -EFAULT;
  934. if (off >= dbg->max_offset)
  935. return -EFAULT;
  936. mutex_lock(&dbg->buflock);
  937. /* Enable Clock for register access */
  938. sde_rot_mgr_lock(dbg->mgr);
  939. if (!sde_rotator_resource_ctrl_enabled(dbg->mgr)) {
  940. SDEROT_WARN("resource ctrl is not enabled\n");
  941. sde_rot_mgr_unlock(dbg->mgr);
  942. goto debug_write_error;
  943. }
  944. sde_rotator_clk_ctrl(dbg->mgr, true);
  945. writel_relaxed(data, dbg->base + off);
  946. /* Disable Clock after register access */
  947. sde_rotator_clk_ctrl(dbg->mgr, false);
  948. sde_rot_mgr_unlock(dbg->mgr);
  949. mutex_unlock(&dbg->buflock);
  950. SDEROT_DBG("addr=%zx data=%x\n", off, data);
  951. return count;
  952. debug_write_error:
  953. mutex_unlock(&dbg->buflock);
  954. return 0;
  955. }
  956. static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
  957. char __user *user_buf, size_t count, loff_t *ppos)
  958. {
  959. struct sde_rotator_debug_base *dbg = file->private_data;
  960. size_t len;
  961. int rc = 0;
  962. if (!dbg) {
  963. SDEROT_ERR("invalid handle\n");
  964. return -ENODEV;
  965. }
  966. mutex_lock(&dbg->buflock);
  967. if (!dbg->buf) {
  968. char dump_buf[64];
  969. char *ptr;
  970. int cnt, tot;
  971. dbg->buf_len = sizeof(dump_buf) *
  972. DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
  973. dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
  974. if (!dbg->buf) {
  975. SDEROT_ERR("not enough memory to hold reg dump\n");
  976. rc = -ENOMEM;
  977. goto debug_read_error;
  978. }
  979. if (dbg->off % sizeof(u32)) {
  980. rc = -EFAULT;
  981. goto debug_read_error;
  982. }
  983. ptr = dbg->base + dbg->off;
  984. tot = 0;
  985. /* Enable clock for register access */
  986. sde_rot_mgr_lock(dbg->mgr);
  987. if (!sde_rotator_resource_ctrl_enabled(dbg->mgr)) {
  988. SDEROT_WARN("resource ctrl is not enabled\n");
  989. sde_rot_mgr_unlock(dbg->mgr);
  990. goto debug_read_error;
  991. }
  992. sde_rotator_clk_ctrl(dbg->mgr, true);
  993. for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
  994. hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
  995. ROW_BYTES, GROUP_BYTES, dump_buf,
  996. sizeof(dump_buf), false);
  997. len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
  998. "0x%08x: %s\n",
  999. ((int) (unsigned long) ptr) -
  1000. ((int) (unsigned long) dbg->base),
  1001. dump_buf);
  1002. ptr += ROW_BYTES;
  1003. tot += len;
  1004. if (tot >= dbg->buf_len)
  1005. break;
  1006. }
  1007. /* Disable clock after register access */
  1008. sde_rotator_clk_ctrl(dbg->mgr, false);
  1009. sde_rot_mgr_unlock(dbg->mgr);
  1010. dbg->buf_len = tot;
  1011. }
  1012. if (*ppos >= dbg->buf_len) {
  1013. rc = 0; /* done reading */
  1014. goto debug_read_error;
  1015. }
  1016. len = min(count, dbg->buf_len - (size_t) *ppos);
  1017. if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
  1018. SDEROT_ERR("failed to copy to user\n");
  1019. rc = -EFAULT;
  1020. goto debug_read_error;
  1021. }
  1022. *ppos += len; /* increase offset */
  1023. mutex_unlock(&dbg->buflock);
  1024. return len;
  1025. debug_read_error:
  1026. mutex_unlock(&dbg->buflock);
  1027. return rc;
  1028. }
  1029. static const struct file_operations sde_rotator_off_fops = {
  1030. .open = sde_rotator_debug_base_open,
  1031. .release = sde_rotator_debug_base_release,
  1032. .read = sde_rotator_debug_base_offset_read,
  1033. .write = sde_rotator_debug_base_offset_write,
  1034. };
  1035. static const struct file_operations sde_rotator_reg_fops = {
  1036. .open = sde_rotator_debug_base_open,
  1037. .release = sde_rotator_debug_base_release,
  1038. .read = sde_rotator_debug_base_reg_read,
  1039. .write = sde_rotator_debug_base_reg_write,
  1040. };
  1041. /*
  1042. * sde_rotator_create_debugfs - Setup rotator debugfs directory structure.
  1043. * @rot_dev: Pointer to rotator device
  1044. */
  1045. struct dentry *sde_rotator_create_debugfs(
  1046. struct sde_rotator_device *rot_dev)
  1047. {
  1048. struct dentry *debugfs_root;
  1049. char dirname[32] = {0};
  1050. snprintf(dirname, sizeof(dirname), "%s%d",
  1051. SDE_ROTATOR_DRV_NAME, rot_dev->dev->id);
  1052. debugfs_root = debugfs_create_dir(dirname, NULL);
  1053. if (!debugfs_root) {
  1054. SDEROT_ERR("fail create debugfs root\n");
  1055. return NULL;
  1056. }
  1057. if (!debugfs_create_file("stats", 0400,
  1058. debugfs_root, rot_dev, &sde_rotator_stat_ops)) {
  1059. SDEROT_ERR("fail create debugfs stats\n");
  1060. debugfs_remove_recursive(debugfs_root);
  1061. return NULL;
  1062. }
  1063. if (!debugfs_create_file("raw", 0400,
  1064. debugfs_root, rot_dev, &sde_rotator_raw_ops)) {
  1065. SDEROT_ERR("fail create debugfs raw\n");
  1066. debugfs_remove_recursive(debugfs_root);
  1067. return NULL;
  1068. }
  1069. debugfs_create_u32("fence_timeout", 0400, debugfs_root, &rot_dev->fence_timeout);
  1070. debugfs_create_u32("open_timeout", 0400, debugfs_root, &rot_dev->open_timeout);
  1071. debugfs_create_u32("disable_syscache", 0400, debugfs_root, &rot_dev->disable_syscache);
  1072. debugfs_create_u32("streamoff_timeout", 0400, debugfs_root, &rot_dev->streamoff_timeout);
  1073. debugfs_create_u32("early_submit", 0400, debugfs_root, &rot_dev->early_submit);
  1074. if (sde_rotator_base_create_debugfs(rot_dev->mdata, debugfs_root)) {
  1075. SDEROT_ERR("fail create base debugfs\n");
  1076. debugfs_remove_recursive(debugfs_root);
  1077. return NULL;
  1078. }
  1079. if (sde_rotator_core_create_debugfs(rot_dev->mgr, debugfs_root)) {
  1080. SDEROT_ERR("fail create core debugfs\n");
  1081. debugfs_remove_recursive(debugfs_root);
  1082. return NULL;
  1083. }
  1084. if (sde_rotator_evtlog_create_debugfs(rot_dev->mgr, debugfs_root)) {
  1085. SDEROT_ERR("fail create evtlog debugfs\n");
  1086. debugfs_remove_recursive(debugfs_root);
  1087. return NULL;
  1088. }
  1089. return debugfs_root;
  1090. }
  1091. /*
  1092. * sde_rotator_destroy_debugfs - Destroy rotator debugfs directory structure.
  1093. * @rot_dev: Pointer to rotator debugfs
  1094. */
  1095. void sde_rotator_destroy_debugfs(struct dentry *debugfs)
  1096. {
  1097. debugfs_remove_recursive(debugfs);
  1098. }
  1099. #endif