sde_rotator_debug.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s: " fmt, __func__
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <linux/slab.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/debugfs.h>
  12. #include "sde_rotator_debug.h"
  13. #include "sde_rotator_base.h"
  14. #include "sde_rotator_core.h"
  15. #include "sde_rotator_dev.h"
  16. #include "sde_rotator_trace.h"
  17. #if IS_ENABLED(CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG)
  18. #define SDE_EVTLOG_DEFAULT_ENABLE 1
  19. #else
  20. #define SDE_EVTLOG_DEFAULT_ENABLE 0
  21. #endif /* CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG */
  22. #define SDE_EVTLOG_DEFAULT_PANIC 1
  23. #define SDE_EVTLOG_DEFAULT_REGDUMP SDE_ROT_DBG_DUMP_IN_MEM
  24. #define SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
  25. #define SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP SDE_ROT_DBG_DUMP_IN_MEM
  26. /*
  27. * evtlog will print this number of entries when it is called through
  28. * sysfs node or panic. This prevents kernel log from evtlog message
  29. * flood.
  30. */
  31. #define SDE_ROT_EVTLOG_PRINT_ENTRY 256
  32. /*
  33. * evtlog keeps this number of entries in memory for debug purpose. This
  34. * number must be greater than print entry to prevent out of bound evtlog
  35. * entry array access.
  36. */
  37. #define SDE_ROT_EVTLOG_ENTRY (SDE_ROT_EVTLOG_PRINT_ENTRY * 4)
  38. #define SDE_ROT_EVTLOG_MAX_DATA 15
  39. #define SDE_ROT_EVTLOG_BUF_MAX 512
  40. #define SDE_ROT_EVTLOG_BUF_ALIGN 32
  41. #define SDE_ROT_DEBUG_BASE_MAX 10
  42. #define SDE_ROT_DEFAULT_BASE_REG_CNT 0x100
  43. #define GROUP_BYTES 4
  44. #define ROW_BYTES 16
  45. #define SDE_ROT_TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
  46. #if defined(CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG) && \
  47. defined(CONFIG_DEBUG_FS)
  48. static DEFINE_SPINLOCK(sde_rot_xlock);
  49. /*
  50. * tlog - EVTLOG entry structure
  51. * @counter - EVTLOG entriy counter
  52. * @time - timestamp of EVTLOG entry
  53. * @name - function name of EVTLOG entry
  54. * @line - line number of EVTLOG entry
  55. * @data - EVTLOG data contents
  56. * @data_cnt - number of data contents
  57. * @pid - pid of current calling thread
  58. */
  59. struct tlog {
  60. u32 counter;
  61. s64 time;
  62. const char *name;
  63. int line;
  64. u32 data[SDE_ROT_EVTLOG_MAX_DATA];
  65. u32 data_cnt;
  66. int pid;
  67. };
  68. /*
  69. * sde_rot_dbg_evtlog - EVTLOG debug data structure
  70. * @logs - EVTLOG entries
  71. * @first - first entry index in the EVTLOG
  72. * @last - last entry index in the EVTLOG
  73. * @curr - curr entry index in the EVTLOG
  74. * @evtlog - EVTLOG debugfs handle
  75. * @evtlog_enable - boolean indicates EVTLOG enable/disable
  76. * @panic_on_err - boolean indicates issue panic after EVTLOG dump
  77. * @enable_reg_dump - control in-log/memory dump for rotator registers
  78. * @enable_vbif_dbgbus_dump - control in-log/memory dump for VBIF debug bus
  79. * @enable_rot_dbgbus_dump - control in-log/memroy dump for rotator debug bus
  80. * @evtlog_dump_work - schedule work strucutre for timeout handler
  81. * @work_dump_reg - storage for register dump control in schedule work
  82. * @work_panic - storage for panic control in schedule work
  83. * @work_vbif_dbgbus - storage for VBIF debug bus control in schedule work
  84. * @work_rot_dbgbus - storage for rotator debug bus control in schedule work
  85. * @nrt_vbif_dbgbus_dump - memory buffer for VBIF debug bus dumping
  86. * @rot_dbgbus_dump - memory buffer for rotator debug bus dumping
  87. * @reg_dump_array - memory buffer for rotator registers dumping
  88. */
  89. struct sde_rot_dbg_evtlog {
  90. struct tlog logs[SDE_ROT_EVTLOG_ENTRY];
  91. u32 first;
  92. u32 last;
  93. u32 curr;
  94. struct dentry *evtlog;
  95. u32 evtlog_enable;
  96. u32 panic_on_err;
  97. u32 enable_reg_dump;
  98. u32 enable_vbif_dbgbus_dump;
  99. u32 enable_rot_dbgbus_dump;
  100. struct work_struct evtlog_dump_work;
  101. bool work_dump_reg;
  102. bool work_panic;
  103. bool work_vbif_dbgbus;
  104. bool work_rot_dbgbus;
  105. u32 *nrt_vbif_dbgbus_dump; /* address for the nrt vbif debug bus dump */
  106. u32 *rot_dbgbus_dump;
  107. u32 *reg_dump_array[SDE_ROT_DEBUG_BASE_MAX];
  108. } sde_rot_dbg_evtlog;
  109. static void sde_rot_dump_debug_bus(u32 bus_dump_flag, u32 **dump_mem)
  110. {
  111. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  112. bool in_log, in_mem;
  113. u32 *dump_addr = NULL;
  114. u32 status = 0;
  115. struct sde_rot_debug_bus *head;
  116. int i;
  117. u32 offset;
  118. void __iomem *base;
  119. in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  120. in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  121. base = mdata->sde_io.base;
  122. if (!base || !mdata->rot_dbg_bus || !mdata->rot_dbg_bus_size)
  123. return;
  124. pr_info("======== SDE Rotator Debug bus DUMP =========\n");
  125. if (in_mem) {
  126. if (!(*dump_mem))
  127. *dump_mem = devm_kzalloc(&mdata->pdev->dev,
  128. mdata->rot_dbg_bus_size * 4 * sizeof(u32),
  129. GFP_KERNEL);
  130. if (*dump_mem) {
  131. dump_addr = *dump_mem;
  132. pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
  133. __func__, dump_addr,
  134. dump_addr + (u32)mdata->rot_dbg_bus_size * 16);
  135. } else {
  136. in_mem = false;
  137. pr_err("dump_mem: allocation fails\n");
  138. }
  139. }
  140. sde_smmu_ctrl(1);
  141. for (i = 0; i < mdata->rot_dbg_bus_size; i++) {
  142. head = mdata->rot_dbg_bus + i;
  143. writel_relaxed(SDE_ROT_TEST_MASK(head->block_id, head->test_id),
  144. base + head->wr_addr);
  145. wmb(); /* make sure test bits were written */
  146. offset = head->wr_addr + 0x4;
  147. status = readl_relaxed(base + offset);
  148. if (in_log)
  149. pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
  150. head->wr_addr, head->block_id, head->test_id,
  151. status);
  152. if (dump_addr && in_mem) {
  153. dump_addr[i*4] = head->wr_addr;
  154. dump_addr[i*4 + 1] = head->block_id;
  155. dump_addr[i*4 + 2] = head->test_id;
  156. dump_addr[i*4 + 3] = status;
  157. }
  158. /* Disable debug bus once we are done */
  159. writel_relaxed(0, base + head->wr_addr);
  160. }
  161. sde_smmu_ctrl(0);
  162. pr_info("========End Debug bus=========\n");
  163. }
  164. /*
  165. * sde_rot_evtlog_is_enabled - helper function for checking EVTLOG
  166. * enable/disable
  167. * @flag - EVTLOG option flag
  168. */
  169. static inline bool sde_rot_evtlog_is_enabled(u32 flag)
  170. {
  171. return (flag & sde_rot_dbg_evtlog.evtlog_enable) ||
  172. (flag == SDE_ROT_EVTLOG_ALL &&
  173. sde_rot_dbg_evtlog.evtlog_enable);
  174. }
  175. /*
  176. * __vbif_debug_bus - helper function for VBIF debug bus dump
  177. * @head - VBIF debug bus data structure
  178. * @vbif_base - VBIF IO mapped address
  179. * @dump_addr - output buffer for memory dump option
  180. * @in_log - boolean indicates in-log dump option
  181. */
  182. static void __vbif_debug_bus(struct sde_rot_vbif_debug_bus *head,
  183. void __iomem *vbif_base, u32 *dump_addr, bool in_log)
  184. {
  185. int i, j;
  186. u32 val;
  187. if (!dump_addr && !in_log)
  188. return;
  189. for (i = 0; i < head->block_cnt; i++) {
  190. writel_relaxed(1 << (i + head->bit_offset),
  191. vbif_base + head->block_bus_addr);
  192. /* make sure that current bus blcok enable */
  193. wmb();
  194. for (j = 0; j < head->test_pnt_cnt; j++) {
  195. writel_relaxed(j, vbif_base + head->block_bus_addr + 4);
  196. /* make sure that test point is enabled */
  197. wmb();
  198. val = readl_relaxed(vbif_base + MMSS_VBIF_TEST_BUS_OUT);
  199. if (dump_addr) {
  200. *dump_addr++ = head->block_bus_addr;
  201. *dump_addr++ = i;
  202. *dump_addr++ = j;
  203. *dump_addr++ = val;
  204. }
  205. if (in_log)
  206. pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
  207. head->block_bus_addr, i, j, val);
  208. }
  209. }
  210. }
  211. /*
  212. * sde_rot_dump_vbif_debug_bus - VBIF debug bus dump
  213. * @bus_dump_flag - dump flag controlling in-log/memory dump option
  214. * @dump_mem - output buffer for memory dump location
  215. */
  216. static void sde_rot_dump_vbif_debug_bus(u32 bus_dump_flag,
  217. u32 **dump_mem)
  218. {
  219. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  220. bool in_log, in_mem;
  221. u32 *dump_addr = NULL;
  222. u32 value;
  223. struct sde_rot_vbif_debug_bus *head;
  224. int i, list_size = 0;
  225. void __iomem *vbif_base;
  226. struct sde_rot_vbif_debug_bus *dbg_bus;
  227. u32 bus_size;
  228. pr_info("======== NRT VBIF Debug bus DUMP =========\n");
  229. vbif_base = mdata->vbif_nrt_io.base;
  230. dbg_bus = mdata->nrt_vbif_dbg_bus;
  231. bus_size = mdata->nrt_vbif_dbg_bus_size;
  232. if (!vbif_base || !dbg_bus || !bus_size)
  233. return;
  234. /* allocate memory for each test point */
  235. for (i = 0; i < bus_size; i++) {
  236. head = dbg_bus + i;
  237. list_size += (head->block_cnt * head->test_pnt_cnt);
  238. }
  239. /* 4 bytes * 4 entries for each test point*/
  240. list_size *= 16;
  241. in_log = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  242. in_mem = (bus_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  243. if (in_mem) {
  244. if (!(*dump_mem))
  245. *dump_mem = devm_kzalloc(&mdata->pdev->dev, list_size,
  246. GFP_KERNEL);
  247. if (*dump_mem) {
  248. dump_addr = *dump_mem;
  249. pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
  250. __func__, dump_addr, dump_addr + list_size);
  251. } else {
  252. in_mem = false;
  253. pr_err("dump_mem: allocation fails\n");
  254. }
  255. }
  256. sde_smmu_ctrl(1);
  257. value = readl_relaxed(vbif_base + MMSS_VBIF_CLKON);
  258. writel_relaxed(value | BIT(1), vbif_base + MMSS_VBIF_CLKON);
  259. /* make sure that vbif core is on */
  260. wmb();
  261. for (i = 0; i < bus_size; i++) {
  262. head = dbg_bus + i;
  263. writel_relaxed(0, vbif_base + head->disable_bus_addr);
  264. writel_relaxed(BIT(0), vbif_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
  265. /* make sure that other bus is off */
  266. wmb();
  267. __vbif_debug_bus(head, vbif_base, dump_addr, in_log);
  268. if (dump_addr)
  269. dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
  270. }
  271. sde_smmu_ctrl(0);
  272. pr_info("========End VBIF Debug bus=========\n");
  273. }
  274. /*
  275. * sde_rot_dump_reg - helper function for dumping rotator register set content
  276. * @dump_name - register set name
  277. * @reg_dump_flag - dumping flag controlling in-log/memory dump location
  278. * @access - access type, sde registers or vbif registers
  279. * @addr - starting address offset for dumping
  280. * @len - range of the register set
  281. * @dump_mem - output buffer for memory dump location option
  282. */
  283. void sde_rot_dump_reg(const char *dump_name, u32 reg_dump_flag,
  284. enum sde_rot_regdump_access access, u32 addr,
  285. int len, u32 **dump_mem)
  286. {
  287. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  288. bool in_log, in_mem;
  289. u32 *dump_addr = NULL;
  290. int i;
  291. void __iomem *base;
  292. in_log = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_LOG);
  293. in_mem = (reg_dump_flag & SDE_ROT_DBG_DUMP_IN_MEM);
  294. pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
  295. reg_dump_flag, in_log, in_mem);
  296. if (len % 16)
  297. len += 16;
  298. len /= 16;
  299. if (in_mem) {
  300. if (!(*dump_mem))
  301. *dump_mem = devm_kzalloc(&mdata->pdev->dev, len * 16,
  302. GFP_KERNEL);
  303. if (*dump_mem) {
  304. dump_addr = *dump_mem;
  305. pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%X\n",
  306. dump_name, dump_addr, dump_addr + (u32)len * 16,
  307. addr);
  308. } else {
  309. in_mem = false;
  310. pr_err("dump_mem: kzalloc fails!\n");
  311. }
  312. }
  313. base = mdata->sde_io.base;
  314. /*
  315. * VBIF NRT base handling
  316. */
  317. if (access == SDE_ROT_REGDUMP_VBIF)
  318. base = mdata->vbif_nrt_io.base;
  319. for (i = 0; i < len; i++) {
  320. u32 x0, x4, x8, xc;
  321. x0 = readl_relaxed(base + addr+0x0);
  322. x4 = readl_relaxed(base + addr+0x4);
  323. x8 = readl_relaxed(base + addr+0x8);
  324. xc = readl_relaxed(base + addr+0xc);
  325. if (in_log)
  326. pr_info("0x%08X : %08x %08x %08x %08x\n",
  327. addr, x0, x4, x8, xc);
  328. if (dump_addr && in_mem) {
  329. dump_addr[i*4] = x0;
  330. dump_addr[i*4 + 1] = x4;
  331. dump_addr[i*4 + 2] = x8;
  332. dump_addr[i*4 + 3] = xc;
  333. }
  334. addr += 16;
  335. }
  336. }
  337. /*
  338. * sde_rot_dump_reg_all - dumping all SDE rotator registers
  339. */
  340. static void sde_rot_dump_reg_all(void)
  341. {
  342. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  343. struct sde_rot_regdump *head, *regdump;
  344. u32 regdump_size;
  345. int i;
  346. regdump = mdata->regdump;
  347. regdump_size = mdata->regdump_size;
  348. if (!regdump || !regdump_size)
  349. return;
  350. /* Enable clock to rotator if not yet enabled */
  351. sde_smmu_ctrl(1);
  352. for (i = 0; (i < regdump_size) && (i < SDE_ROT_DEBUG_BASE_MAX); i++) {
  353. head = &regdump[i];
  354. if (head->access == SDE_ROT_REGDUMP_WRITE) {
  355. if (head->len != 1) {
  356. SDEROT_ERR("invalid write len %u\n", head->len);
  357. continue;
  358. }
  359. writel_relaxed(head->value,
  360. mdata->sde_io.base + head->offset);
  361. /* Make sure write go through */
  362. wmb();
  363. } else {
  364. sde_rot_dump_reg(head->name,
  365. sde_rot_dbg_evtlog.enable_reg_dump,
  366. head->access,
  367. head->offset, head->len,
  368. &sde_rot_dbg_evtlog.reg_dump_array[i]);
  369. }
  370. }
  371. /* Disable rotator clock */
  372. sde_smmu_ctrl(0);
  373. }
  374. /*
  375. * __sde_rot_evtlog_dump_calc_range - calculate dump range for EVTLOG
  376. */
  377. static bool __sde_rot_evtlog_dump_calc_range(void)
  378. {
  379. static u32 next;
  380. bool need_dump = true;
  381. unsigned long flags;
  382. struct sde_rot_dbg_evtlog *evtlog = &sde_rot_dbg_evtlog;
  383. spin_lock_irqsave(&sde_rot_xlock, flags);
  384. evtlog->first = next;
  385. if (evtlog->last == evtlog->first) {
  386. need_dump = false;
  387. goto dump_exit;
  388. }
  389. if (evtlog->last < evtlog->first) {
  390. evtlog->first %= SDE_ROT_EVTLOG_ENTRY;
  391. if (evtlog->last < evtlog->first)
  392. evtlog->last += SDE_ROT_EVTLOG_ENTRY;
  393. }
  394. if ((evtlog->last - evtlog->first) > SDE_ROT_EVTLOG_PRINT_ENTRY) {
  395. pr_warn("evtlog buffer overflow before dump: %d\n",
  396. evtlog->last - evtlog->first);
  397. evtlog->first = evtlog->last - SDE_ROT_EVTLOG_PRINT_ENTRY;
  398. }
  399. next = evtlog->first + 1;
  400. dump_exit:
  401. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  402. return need_dump;
  403. }
  404. /*
  405. * sde_rot_evtlog_dump_entry - helper function for EVTLOG content dumping
  406. * @evtlog_buf: EVTLOG dump output buffer
  407. * @evtlog_buf_size: EVTLOG output buffer size
  408. */
  409. static ssize_t sde_rot_evtlog_dump_entry(char *evtlog_buf,
  410. ssize_t evtlog_buf_size)
  411. {
  412. int i;
  413. ssize_t off = 0;
  414. struct tlog *log, *prev_log;
  415. unsigned long flags;
  416. spin_lock_irqsave(&sde_rot_xlock, flags);
  417. log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.first %
  418. SDE_ROT_EVTLOG_ENTRY];
  419. prev_log = &sde_rot_dbg_evtlog.logs[(sde_rot_dbg_evtlog.first - 1) %
  420. SDE_ROT_EVTLOG_ENTRY];
  421. off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
  422. log->name, log->line);
  423. if (off < SDE_ROT_EVTLOG_BUF_ALIGN) {
  424. memset((evtlog_buf + off), 0x20,
  425. (SDE_ROT_EVTLOG_BUF_ALIGN - off));
  426. off = SDE_ROT_EVTLOG_BUF_ALIGN;
  427. }
  428. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
  429. "=>[%-8d:%-11llu:%9llu][%-4d]:", sde_rot_dbg_evtlog.first,
  430. log->time, (log->time - prev_log->time), log->pid);
  431. for (i = 0; i < log->data_cnt; i++)
  432. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
  433. "%x ", log->data[i]);
  434. off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
  435. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  436. return off;
  437. }
  438. /*
  439. * sde_rot_evtlog_dump_all - Dumping all content in EVTLOG buffer
  440. */
  441. static void sde_rot_evtlog_dump_all(void)
  442. {
  443. char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
  444. while (__sde_rot_evtlog_dump_calc_range()) {
  445. sde_rot_evtlog_dump_entry(evtlog_buf, SDE_ROT_EVTLOG_BUF_MAX);
  446. pr_info("%s\n", evtlog_buf);
  447. }
  448. }
  449. /*
  450. * sde_rot_evtlog_dump_open - debugfs open handler for evtlog dump
  451. * @inode: debugfs inode
  452. * @file: file handler
  453. */
  454. static int sde_rot_evtlog_dump_open(struct inode *inode, struct file *file)
  455. {
  456. /* non-seekable */
  457. file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
  458. file->private_data = inode->i_private;
  459. return 0;
  460. }
  461. /*
  462. * sde_rot_evtlog_dump_read - debugfs read handler for evtlog dump
  463. * @file: file handler
  464. * @buff: user buffer content for debugfs
  465. * @count: size of user buffer
  466. * @ppos: position offset of user buffer
  467. */
  468. static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff,
  469. size_t count, loff_t *ppos)
  470. {
  471. ssize_t len = 0;
  472. char evtlog_buf[SDE_ROT_EVTLOG_BUF_MAX];
  473. if (__sde_rot_evtlog_dump_calc_range()) {
  474. len = sde_rot_evtlog_dump_entry(evtlog_buf,
  475. SDE_ROT_EVTLOG_BUF_MAX);
  476. if (len < 0 || len > count) {
  477. pr_err("len is more than the user buffer size\n");
  478. return 0;
  479. }
  480. if (copy_to_user(buff, evtlog_buf, len))
  481. return -EFAULT;
  482. *ppos += len;
  483. }
  484. return len;
  485. }
  486. /*
  487. * sde_rot_evtlog_dump_helper - helper function for evtlog dump
  488. * @dead: boolean indicates panic after dump
  489. * @panic_name: Panic signature name show up in log
  490. * @dump_rot: boolean indicates rotator register dump
  491. * @dump_vbif_debug_bus: boolean indicates VBIF debug bus dump
  492. */
  493. static void sde_rot_evtlog_dump_helper(bool dead, const char *panic_name,
  494. bool dump_rot, bool dump_vbif_debug_bus, bool dump_rot_debug_bus)
  495. {
  496. sde_rot_evtlog_dump_all();
  497. if (dump_rot_debug_bus)
  498. sde_rot_dump_debug_bus(
  499. sde_rot_dbg_evtlog.enable_rot_dbgbus_dump,
  500. &sde_rot_dbg_evtlog.rot_dbgbus_dump);
  501. if (dump_vbif_debug_bus)
  502. sde_rot_dump_vbif_debug_bus(
  503. sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump,
  504. &sde_rot_dbg_evtlog.nrt_vbif_dbgbus_dump);
  505. /*
  506. * Rotator registers always dump last
  507. */
  508. if (dump_rot)
  509. sde_rot_dump_reg_all();
  510. if (dead)
  511. panic(panic_name);
  512. }
  513. /*
  514. * sde_rot_evtlog_debug_work - schedule work function for evtlog dump
  515. * @work: schedule work structure
  516. */
  517. static void sde_rot_evtlog_debug_work(struct work_struct *work)
  518. {
  519. sde_rot_evtlog_dump_helper(
  520. sde_rot_dbg_evtlog.work_panic,
  521. "evtlog_workitem",
  522. sde_rot_dbg_evtlog.work_dump_reg,
  523. sde_rot_dbg_evtlog.work_vbif_dbgbus,
  524. sde_rot_dbg_evtlog.work_rot_dbgbus);
  525. }
  526. #if defined(CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG) && defined(CONFIG_DEBUG_FS)
  527. /*
  528. * sde_rot_evtlog_tout_handler - log dump timeout handler
  529. * @queue: boolean indicate putting log dump into queue
  530. * @name: function name having timeout
  531. */
  532. void sde_rot_evtlog_tout_handler(bool queue, const char *name, ...)
  533. {
  534. int i;
  535. bool dead = false;
  536. bool dump_rot = false;
  537. bool dump_vbif_dbgbus = false;
  538. bool dump_rot_dbgbus = false;
  539. char *blk_name = NULL;
  540. va_list args;
  541. if (!sde_rot_evtlog_is_enabled(SDE_ROT_EVTLOG_DEFAULT))
  542. return;
  543. if (queue && work_pending(&sde_rot_dbg_evtlog.evtlog_dump_work))
  544. return;
  545. va_start(args, name);
  546. for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
  547. blk_name = va_arg(args, char*);
  548. if (IS_ERR_OR_NULL(blk_name))
  549. break;
  550. if (!strcmp(blk_name, "rot"))
  551. dump_rot = true;
  552. if (!strcmp(blk_name, "vbif_dbg_bus"))
  553. dump_vbif_dbgbus = true;
  554. if (!strcmp(blk_name, "rot_dbg_bus"))
  555. dump_rot_dbgbus = true;
  556. if (!strcmp(blk_name, "panic"))
  557. dead = true;
  558. }
  559. va_end(args);
  560. if (queue) {
  561. /* schedule work to dump later */
  562. sde_rot_dbg_evtlog.work_panic = dead;
  563. sde_rot_dbg_evtlog.work_dump_reg = dump_rot;
  564. sde_rot_dbg_evtlog.work_vbif_dbgbus = dump_vbif_dbgbus;
  565. sde_rot_dbg_evtlog.work_rot_dbgbus = dump_rot_dbgbus;
  566. schedule_work(&sde_rot_dbg_evtlog.evtlog_dump_work);
  567. } else {
  568. sde_rot_evtlog_dump_helper(dead, name, dump_rot,
  569. dump_vbif_dbgbus, dump_rot_dbgbus);
  570. }
  571. }
  572. /*
  573. * sde_rot_evtlog - log contents into memory for dump analysis
  574. * @name: Name of function calling evtlog
  575. * @line: line number of calling function
  576. * @flag: Log control flag
  577. */
  578. void sde_rot_evtlog(const char *name, int line, int flag, ...)
  579. {
  580. unsigned long flags;
  581. int i, val = 0;
  582. va_list args;
  583. struct tlog *log;
  584. if (!sde_rot_evtlog_is_enabled(flag))
  585. return;
  586. spin_lock_irqsave(&sde_rot_xlock, flags);
  587. log = &sde_rot_dbg_evtlog.logs[sde_rot_dbg_evtlog.curr];
  588. log->time = ktime_to_us(ktime_get());
  589. log->name = name;
  590. log->line = line;
  591. log->data_cnt = 0;
  592. log->pid = current->pid;
  593. va_start(args, flag);
  594. for (i = 0; i < SDE_ROT_EVTLOG_MAX_DATA; i++) {
  595. val = va_arg(args, int);
  596. if (val == SDE_ROT_DATA_LIMITER)
  597. break;
  598. log->data[i] = val;
  599. }
  600. va_end(args);
  601. log->data_cnt = i;
  602. sde_rot_dbg_evtlog.curr =
  603. (sde_rot_dbg_evtlog.curr + 1) % SDE_ROT_EVTLOG_ENTRY;
  604. sde_rot_dbg_evtlog.last++;
  605. trace_sde_rot_evtlog(name, line, log->data_cnt, log->data);
  606. spin_unlock_irqrestore(&sde_rot_xlock, flags);
  607. }
  608. #endif
  609. /*
  610. * sde_rotator_stat_show - Show statistics on read to this debugfs file
  611. * @s: Pointer to sequence file structure
  612. * @data: Pointer to private data structure
  613. */
  614. static int sde_rotator_stat_show(struct seq_file *s, void *data)
  615. {
  616. int i, offset;
  617. struct sde_rotator_device *rot_dev = s->private;
  618. struct sde_rotator_statistics *stats = &rot_dev->stats;
  619. u64 count = stats->count;
  620. int num_events;
  621. s64 proc_max, proc_min, proc_avg;
  622. s64 swoh_max, swoh_min, swoh_avg;
  623. proc_max = 0;
  624. proc_min = S64_MAX;
  625. proc_avg = 0;
  626. swoh_max = 0;
  627. swoh_min = S64_MAX;
  628. swoh_avg = 0;
  629. if (count > SDE_ROTATOR_NUM_EVENTS) {
  630. num_events = SDE_ROTATOR_NUM_EVENTS;
  631. offset = count % SDE_ROTATOR_NUM_EVENTS;
  632. } else {
  633. num_events = count;
  634. offset = 0;
  635. }
  636. for (i = 0; i < num_events; i++) {
  637. int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
  638. ktime_t *ts = stats->ts[k];
  639. ktime_t start_time =
  640. ktime_before(ts[SDE_ROTATOR_TS_SRCQB],
  641. ts[SDE_ROTATOR_TS_DSTQB]) ?
  642. ts[SDE_ROTATOR_TS_SRCQB] :
  643. ts[SDE_ROTATOR_TS_DSTQB];
  644. s64 proc_time =
  645. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
  646. start_time));
  647. s64 sw_overhead_time =
  648. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
  649. start_time));
  650. seq_printf(s,
  651. "s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld st:%lld fl:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
  652. i,
  653. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
  654. ts[SDE_ROTATOR_TS_SRCQB])),
  655. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
  656. ts[SDE_ROTATOR_TS_DSTQB])),
  657. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_QUEUE],
  658. ts[SDE_ROTATOR_TS_FENCE])),
  659. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_COMMIT],
  660. ts[SDE_ROTATOR_TS_QUEUE])),
  661. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_START],
  662. ts[SDE_ROTATOR_TS_COMMIT])),
  663. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
  664. ts[SDE_ROTATOR_TS_START])),
  665. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
  666. ts[SDE_ROTATOR_TS_FLUSH])),
  667. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
  668. ts[SDE_ROTATOR_TS_DONE])),
  669. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_SRCDQB],
  670. ts[SDE_ROTATOR_TS_RETIRE])),
  671. ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DSTDQB],
  672. ts[SDE_ROTATOR_TS_RETIRE])),
  673. proc_time, sw_overhead_time);
  674. proc_max = max(proc_max, proc_time);
  675. proc_min = min(proc_min, proc_time);
  676. proc_avg += proc_time;
  677. swoh_max = max(swoh_max, sw_overhead_time);
  678. swoh_min = min(swoh_min, sw_overhead_time);
  679. swoh_avg += sw_overhead_time;
  680. }
  681. proc_avg = (num_events) ?
  682. DIV_ROUND_CLOSEST_ULL(proc_avg, num_events) : 0;
  683. swoh_avg = (num_events) ?
  684. DIV_ROUND_CLOSEST_ULL(swoh_avg, num_events) : 0;
  685. seq_printf(s, "count:%llu\n", count);
  686. seq_printf(s, "fai1:%llu\n", stats->fail_count);
  687. seq_printf(s, "t_max:%lld\n", proc_max);
  688. seq_printf(s, "t_min:%lld\n", proc_min);
  689. seq_printf(s, "t_avg:%lld\n", proc_avg);
  690. seq_printf(s, "swoh_max:%lld\n", swoh_max);
  691. seq_printf(s, "swoh_min:%lld\n", swoh_min);
  692. seq_printf(s, "swoh_avg:%lld\n", swoh_avg);
  693. return 0;
  694. }
  695. /*
  696. * sde_rotator_raw_show - Show raw statistics on read from this debugfs file
  697. * @s: Pointer to sequence file structure
  698. * @data: Pointer to private data structure
  699. */
  700. static int sde_rotator_raw_show(struct seq_file *s, void *data)
  701. {
  702. int i, j, offset;
  703. struct sde_rotator_device *rot_dev = s->private;
  704. struct sde_rotator_statistics *stats = &rot_dev->stats;
  705. u64 count = stats->count;
  706. int num_events;
  707. if (count > SDE_ROTATOR_NUM_EVENTS) {
  708. num_events = SDE_ROTATOR_NUM_EVENTS;
  709. offset = count % SDE_ROTATOR_NUM_EVENTS;
  710. } else {
  711. num_events = count;
  712. offset = 0;
  713. }
  714. for (i = 0; i < num_events; i++) {
  715. int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
  716. ktime_t *ts = stats->ts[k];
  717. seq_printf(s, "%d ", i);
  718. for (j = 0; j < SDE_ROTATOR_NUM_TIMESTAMPS; j++)
  719. seq_printf(s, "%lld ", ktime_to_us(ts[j]));
  720. seq_puts(s, "\n");
  721. }
  722. return 0;
  723. }
  724. /*
  725. * sde_rotator_dbg_open - Processed statistics debugfs file open function
  726. * @inode:
  727. * @file:
  728. */
  729. static int sde_rotator_stat_open(struct inode *inode, struct file *file)
  730. {
  731. return single_open(file, sde_rotator_stat_show, inode->i_private);
  732. }
  733. /*
  734. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  735. * @inode:
  736. * @file:
  737. */
  738. static int sde_rotator_raw_open(struct inode *inode, struct file *file)
  739. {
  740. return single_open(file, sde_rotator_raw_show, inode->i_private);
  741. }
  742. /*
  743. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  744. * @mdata: Pointer to rotator global data
  745. * @debugfs_root: Pointer to parent debugfs node
  746. */
  747. static int sde_rotator_base_create_debugfs(
  748. struct sde_rot_data_type *mdata,
  749. struct dentry *debugfs_root)
  750. {
  751. debugfs_create_u32("iommu_ref_cnt", 0444, debugfs_root, &mdata->iommu_ref_cnt);
  752. mdata->clk_always_on = false;
  753. debugfs_create_bool("clk_always_on", 0644, debugfs_root, &mdata->clk_always_on);
  754. return 0;
  755. }
  756. /*
  757. * sde_rotator_dbg_open - Raw statistics debugfs file open function
  758. * @mgr: Pointer to rotator manager structure
  759. * @debugfs_root: Pointer to parent debugfs node
  760. */
  761. static int sde_rotator_core_create_debugfs(
  762. struct sde_rot_mgr *mgr,
  763. struct dentry *debugfs_root)
  764. {
  765. int ret;
  766. debugfs_create_u32("hwacquire_timeout", 0400, debugfs_root, &mgr->hwacquire_timeout);
  767. debugfs_create_u32("ppc_numer", 0644, debugfs_root, &mgr->pixel_per_clk.numer);
  768. debugfs_create_u32("ppc_denom", 0600, debugfs_root, &mgr->pixel_per_clk.denom);
  769. debugfs_create_u64("enable_bw_vote", 0644, debugfs_root, &mgr->enable_bw_vote);
  770. if (mgr->ops_hw_create_debugfs) {
  771. ret = mgr->ops_hw_create_debugfs(mgr, debugfs_root);
  772. if (ret)
  773. return ret;
  774. }
  775. return 0;
  776. }
  777. static const struct file_operations sde_rot_evtlog_fops = {
  778. .open = sde_rot_evtlog_dump_open,
  779. .read = sde_rot_evtlog_dump_read,
  780. };
  781. static int sde_rotator_evtlog_create_debugfs(
  782. struct sde_rot_mgr *mgr,
  783. struct dentry *debugfs_root)
  784. {
  785. int i;
  786. sde_rot_dbg_evtlog.evtlog = debugfs_create_dir("evtlog", debugfs_root);
  787. if (IS_ERR_OR_NULL(sde_rot_dbg_evtlog.evtlog)) {
  788. pr_err("debugfs_create_dir fail, error %ld\n",
  789. PTR_ERR(sde_rot_dbg_evtlog.evtlog));
  790. sde_rot_dbg_evtlog.evtlog = NULL;
  791. return -ENODEV;
  792. }
  793. INIT_WORK(&sde_rot_dbg_evtlog.evtlog_dump_work,
  794. sde_rot_evtlog_debug_work);
  795. sde_rot_dbg_evtlog.work_panic = false;
  796. for (i = 0; i < SDE_ROT_EVTLOG_ENTRY; i++)
  797. sde_rot_dbg_evtlog.logs[i].counter = i;
  798. debugfs_create_file("dump", 0644, sde_rot_dbg_evtlog.evtlog, NULL,
  799. &sde_rot_evtlog_fops);
  800. debugfs_create_u32("enable", 0644, sde_rot_dbg_evtlog.evtlog,
  801. &sde_rot_dbg_evtlog.evtlog_enable);
  802. debugfs_create_u32("panic", 0644, sde_rot_dbg_evtlog.evtlog,
  803. &sde_rot_dbg_evtlog.panic_on_err);
  804. debugfs_create_u32("reg_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  805. &sde_rot_dbg_evtlog.enable_reg_dump);
  806. debugfs_create_u32("vbif_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  807. &sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump);
  808. debugfs_create_u32("rot_dbgbus_dump", 0644, sde_rot_dbg_evtlog.evtlog,
  809. &sde_rot_dbg_evtlog.enable_rot_dbgbus_dump);
  810. sde_rot_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
  811. sde_rot_dbg_evtlog.panic_on_err = SDE_EVTLOG_DEFAULT_PANIC;
  812. sde_rot_dbg_evtlog.enable_reg_dump = SDE_EVTLOG_DEFAULT_REGDUMP;
  813. sde_rot_dbg_evtlog.enable_vbif_dbgbus_dump =
  814. SDE_EVTLOG_DEFAULT_VBIF_DBGBUSDUMP;
  815. sde_rot_dbg_evtlog.enable_rot_dbgbus_dump =
  816. SDE_EVTLOG_DEFAULT_ROT_DBGBUSDUMP;
  817. pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
  818. sde_rot_dbg_evtlog.evtlog_enable,
  819. sde_rot_dbg_evtlog.panic_on_err,
  820. sde_rot_dbg_evtlog.enable_reg_dump);
  821. return 0;
  822. }
  823. /*
  824. * struct sde_rotator_stat_ops - processed statistics file operations
  825. */
  826. static const struct file_operations sde_rotator_stat_ops = {
  827. .open = sde_rotator_stat_open,
  828. .read = seq_read,
  829. .llseek = seq_lseek,
  830. .release = single_release
  831. };
  832. /*
  833. * struct sde_rotator_raw_ops - raw statistics file operations
  834. */
  835. static const struct file_operations sde_rotator_raw_ops = {
  836. .open = sde_rotator_raw_open,
  837. .read = seq_read,
  838. .llseek = seq_lseek,
  839. .release = single_release
  840. };
  841. static int sde_rotator_debug_base_open(struct inode *inode, struct file *file)
  842. {
  843. /* non-seekable */
  844. file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
  845. file->private_data = inode->i_private;
  846. return 0;
  847. }
  848. static int sde_rotator_debug_base_release(struct inode *inode,
  849. struct file *file)
  850. {
  851. struct sde_rotator_debug_base *dbg = file->private_data;
  852. if (dbg) {
  853. mutex_lock(&dbg->buflock);
  854. kfree(dbg->buf);
  855. dbg->buf_len = 0;
  856. dbg->buf = NULL;
  857. mutex_unlock(&dbg->buflock);
  858. }
  859. return 0;
  860. }
  861. static ssize_t sde_rotator_debug_base_offset_write(struct file *file,
  862. const char __user *user_buf, size_t count, loff_t *ppos)
  863. {
  864. struct sde_rotator_debug_base *dbg = file->private_data;
  865. u32 off = 0;
  866. u32 cnt = SDE_ROT_DEFAULT_BASE_REG_CNT;
  867. char buf[24];
  868. if (!dbg)
  869. return -ENODEV;
  870. if (count >= sizeof(buf))
  871. return -EFAULT;
  872. if (copy_from_user(buf, user_buf, count))
  873. return -EFAULT;
  874. buf[count] = 0;
  875. if (sscanf(buf, "%5x %x", &off, &cnt) < 2)
  876. return -EINVAL;
  877. if (off % sizeof(u32))
  878. return -EINVAL;
  879. if (off > dbg->max_offset)
  880. return -EINVAL;
  881. if (cnt > (dbg->max_offset - off))
  882. cnt = dbg->max_offset - off;
  883. mutex_lock(&dbg->buflock);
  884. dbg->off = off;
  885. dbg->cnt = cnt;
  886. mutex_unlock(&dbg->buflock);
  887. SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
  888. return count;
  889. }
  890. static ssize_t sde_rotator_debug_base_offset_read(struct file *file,
  891. char __user *buff, size_t count, loff_t *ppos)
  892. {
  893. struct sde_rotator_debug_base *dbg = file->private_data;
  894. int len = 0;
  895. char buf[24] = {'\0'};
  896. if (!dbg)
  897. return -ENODEV;
  898. if (*ppos)
  899. return 0; /* the end */
  900. mutex_lock(&dbg->buflock);
  901. len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
  902. mutex_unlock(&dbg->buflock);
  903. if (len < 0 || len >= sizeof(buf))
  904. return 0;
  905. if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
  906. return -EFAULT;
  907. *ppos += len; /* increase offset */
  908. return len;
  909. }
  910. static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
  911. const char __user *user_buf, size_t count, loff_t *ppos)
  912. {
  913. struct sde_rotator_debug_base *dbg = file->private_data;
  914. size_t off;
  915. u32 data, cnt;
  916. char buf[24];
  917. if (!dbg)
  918. return -ENODEV;
  919. if (count >= sizeof(buf))
  920. return -EFAULT;
  921. if (copy_from_user(buf, user_buf, count))
  922. return -EFAULT;
  923. buf[count] = 0;
  924. cnt = sscanf(buf, "%zx %x", &off, &data);
  925. if (cnt < 2)
  926. return -EFAULT;
  927. if (off % sizeof(u32))
  928. return -EFAULT;
  929. if (off >= dbg->max_offset)
  930. return -EFAULT;
  931. mutex_lock(&dbg->buflock);
  932. /* Enable Clock for register access */
  933. sde_rot_mgr_lock(dbg->mgr);
  934. if (!sde_rotator_resource_ctrl_enabled(dbg->mgr)) {
  935. SDEROT_WARN("resource ctrl is not enabled\n");
  936. sde_rot_mgr_unlock(dbg->mgr);
  937. goto debug_write_error;
  938. }
  939. sde_rotator_clk_ctrl(dbg->mgr, true);
  940. writel_relaxed(data, dbg->base + off);
  941. /* Disable Clock after register access */
  942. sde_rotator_clk_ctrl(dbg->mgr, false);
  943. sde_rot_mgr_unlock(dbg->mgr);
  944. mutex_unlock(&dbg->buflock);
  945. SDEROT_DBG("addr=%zx data=%x\n", off, data);
  946. return count;
  947. debug_write_error:
  948. mutex_unlock(&dbg->buflock);
  949. return 0;
  950. }
  951. static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
  952. char __user *user_buf, size_t count, loff_t *ppos)
  953. {
  954. struct sde_rotator_debug_base *dbg = file->private_data;
  955. size_t len;
  956. int rc = 0;
  957. if (!dbg) {
  958. SDEROT_ERR("invalid handle\n");
  959. return -ENODEV;
  960. }
  961. mutex_lock(&dbg->buflock);
  962. if (!dbg->buf) {
  963. char dump_buf[64];
  964. char *ptr;
  965. int cnt, tot;
  966. dbg->buf_len = sizeof(dump_buf) *
  967. DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
  968. dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
  969. if (!dbg->buf) {
  970. SDEROT_ERR("not enough memory to hold reg dump\n");
  971. rc = -ENOMEM;
  972. goto debug_read_error;
  973. }
  974. if (dbg->off % sizeof(u32)) {
  975. rc = -EFAULT;
  976. goto debug_read_error;
  977. }
  978. ptr = dbg->base + dbg->off;
  979. tot = 0;
  980. /* Enable clock for register access */
  981. sde_rot_mgr_lock(dbg->mgr);
  982. if (!sde_rotator_resource_ctrl_enabled(dbg->mgr)) {
  983. SDEROT_WARN("resource ctrl is not enabled\n");
  984. sde_rot_mgr_unlock(dbg->mgr);
  985. goto debug_read_error;
  986. }
  987. sde_rotator_clk_ctrl(dbg->mgr, true);
  988. for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
  989. hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
  990. ROW_BYTES, GROUP_BYTES, dump_buf,
  991. sizeof(dump_buf), false);
  992. len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
  993. "0x%08x: %s\n",
  994. ((int) (unsigned long) ptr) -
  995. ((int) (unsigned long) dbg->base),
  996. dump_buf);
  997. ptr += ROW_BYTES;
  998. tot += len;
  999. if (tot >= dbg->buf_len)
  1000. break;
  1001. }
  1002. /* Disable clock after register access */
  1003. sde_rotator_clk_ctrl(dbg->mgr, false);
  1004. sde_rot_mgr_unlock(dbg->mgr);
  1005. dbg->buf_len = tot;
  1006. }
  1007. if (*ppos >= dbg->buf_len) {
  1008. rc = 0; /* done reading */
  1009. goto debug_read_error;
  1010. }
  1011. len = min(count, dbg->buf_len - (size_t) *ppos);
  1012. if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
  1013. SDEROT_ERR("failed to copy to user\n");
  1014. rc = -EFAULT;
  1015. goto debug_read_error;
  1016. }
  1017. *ppos += len; /* increase offset */
  1018. mutex_unlock(&dbg->buflock);
  1019. return len;
  1020. debug_read_error:
  1021. mutex_unlock(&dbg->buflock);
  1022. return rc;
  1023. }
  1024. static const struct file_operations sde_rotator_off_fops = {
  1025. .open = sde_rotator_debug_base_open,
  1026. .release = sde_rotator_debug_base_release,
  1027. .read = sde_rotator_debug_base_offset_read,
  1028. .write = sde_rotator_debug_base_offset_write,
  1029. };
  1030. static const struct file_operations sde_rotator_reg_fops = {
  1031. .open = sde_rotator_debug_base_open,
  1032. .release = sde_rotator_debug_base_release,
  1033. .read = sde_rotator_debug_base_reg_read,
  1034. .write = sde_rotator_debug_base_reg_write,
  1035. };
  1036. /*
  1037. * sde_rotator_create_debugfs - Setup rotator debugfs directory structure.
  1038. * @rot_dev: Pointer to rotator device
  1039. */
  1040. struct dentry *sde_rotator_create_debugfs(
  1041. struct sde_rotator_device *rot_dev)
  1042. {
  1043. struct dentry *debugfs_root;
  1044. char dirname[32] = {0};
  1045. snprintf(dirname, sizeof(dirname), "%s%d",
  1046. SDE_ROTATOR_DRV_NAME, rot_dev->dev->id);
  1047. debugfs_root = debugfs_create_dir(dirname, NULL);
  1048. if (!debugfs_root) {
  1049. SDEROT_ERR("fail create debugfs root\n");
  1050. return NULL;
  1051. }
  1052. if (!debugfs_create_file("stats", 0400,
  1053. debugfs_root, rot_dev, &sde_rotator_stat_ops)) {
  1054. SDEROT_ERR("fail create debugfs stats\n");
  1055. debugfs_remove_recursive(debugfs_root);
  1056. return NULL;
  1057. }
  1058. if (!debugfs_create_file("raw", 0400,
  1059. debugfs_root, rot_dev, &sde_rotator_raw_ops)) {
  1060. SDEROT_ERR("fail create debugfs raw\n");
  1061. debugfs_remove_recursive(debugfs_root);
  1062. return NULL;
  1063. }
  1064. debugfs_create_u32("fence_timeout", 0400, debugfs_root, &rot_dev->fence_timeout);
  1065. debugfs_create_u32("open_timeout", 0400, debugfs_root, &rot_dev->open_timeout);
  1066. debugfs_create_u32("disable_syscache", 0400, debugfs_root, &rot_dev->disable_syscache);
  1067. debugfs_create_u32("streamoff_timeout", 0400, debugfs_root, &rot_dev->streamoff_timeout);
  1068. debugfs_create_u32("early_submit", 0400, debugfs_root, &rot_dev->early_submit);
  1069. if (sde_rotator_base_create_debugfs(rot_dev->mdata, debugfs_root)) {
  1070. SDEROT_ERR("fail create base debugfs\n");
  1071. debugfs_remove_recursive(debugfs_root);
  1072. return NULL;
  1073. }
  1074. if (sde_rotator_core_create_debugfs(rot_dev->mgr, debugfs_root)) {
  1075. SDEROT_ERR("fail create core debugfs\n");
  1076. debugfs_remove_recursive(debugfs_root);
  1077. return NULL;
  1078. }
  1079. if (sde_rotator_evtlog_create_debugfs(rot_dev->mgr, debugfs_root)) {
  1080. SDEROT_ERR("fail create evtlog debugfs\n");
  1081. debugfs_remove_recursive(debugfs_root);
  1082. return NULL;
  1083. }
  1084. return debugfs_root;
  1085. }
  1086. /*
  1087. * sde_rotator_destroy_debugfs - Destroy rotator debugfs directory structure.
  1088. * @rot_dev: Pointer to rotator debugfs
  1089. */
  1090. void sde_rotator_destroy_debugfs(struct dentry *debugfs)
  1091. {
  1092. debugfs_remove_recursive(debugfs);
  1093. }
  1094. #endif