dw-edma-v0-debugfs.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
  4. * Synopsys DesignWare eDMA v0 core
  5. *
  6. * Author: Gustavo Pimentel <[email protected]>
  7. */
  8. #include <linux/debugfs.h>
  9. #include <linux/bitfield.h>
  10. #include "dw-edma-v0-debugfs.h"
  11. #include "dw-edma-v0-regs.h"
  12. #include "dw-edma-core.h"
  13. #define REGS_ADDR(name) \
  14. ((void __force *)&regs->name)
  15. #define REGISTER(name) \
  16. { #name, REGS_ADDR(name) }
  17. #define WR_REGISTER(name) \
  18. { #name, REGS_ADDR(wr_##name) }
  19. #define RD_REGISTER(name) \
  20. { #name, REGS_ADDR(rd_##name) }
  21. #define WR_REGISTER_LEGACY(name) \
  22. { #name, REGS_ADDR(type.legacy.wr_##name) }
  23. #define RD_REGISTER_LEGACY(name) \
  24. { #name, REGS_ADDR(type.legacy.rd_##name) }
  25. #define WR_REGISTER_UNROLL(name) \
  26. { #name, REGS_ADDR(type.unroll.wr_##name) }
  27. #define RD_REGISTER_UNROLL(name) \
  28. { #name, REGS_ADDR(type.unroll.rd_##name) }
  29. #define WRITE_STR "write"
  30. #define READ_STR "read"
  31. #define CHANNEL_STR "channel"
  32. #define REGISTERS_STR "registers"
  33. static struct dw_edma *dw;
  34. static struct dw_edma_v0_regs __iomem *regs;
  35. static struct {
  36. void __iomem *start;
  37. void __iomem *end;
  38. } lim[2][EDMA_V0_MAX_NR_CH];
  39. struct debugfs_entries {
  40. const char *name;
  41. dma_addr_t *reg;
  42. };
  43. static int dw_edma_debugfs_u32_get(void *data, u64 *val)
  44. {
  45. void __iomem *reg = (void __force __iomem *)data;
  46. if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
  47. reg >= (void __iomem *)&regs->type.legacy.ch) {
  48. void __iomem *ptr = &regs->type.legacy.ch;
  49. u32 viewport_sel = 0;
  50. unsigned long flags;
  51. u16 ch;
  52. for (ch = 0; ch < dw->wr_ch_cnt; ch++)
  53. if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
  54. ptr += (reg - lim[0][ch].start);
  55. goto legacy_sel_wr;
  56. }
  57. for (ch = 0; ch < dw->rd_ch_cnt; ch++)
  58. if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
  59. ptr += (reg - lim[1][ch].start);
  60. goto legacy_sel_rd;
  61. }
  62. return 0;
  63. legacy_sel_rd:
  64. viewport_sel = BIT(31);
  65. legacy_sel_wr:
  66. viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
  67. raw_spin_lock_irqsave(&dw->lock, flags);
  68. writel(viewport_sel, &regs->type.legacy.viewport_sel);
  69. *val = readl(ptr);
  70. raw_spin_unlock_irqrestore(&dw->lock, flags);
  71. } else {
  72. *val = readl(reg);
  73. }
  74. return 0;
  75. }
  76. DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
  77. static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
  78. int nr_entries, struct dentry *dir)
  79. {
  80. int i;
  81. for (i = 0; i < nr_entries; i++) {
  82. if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
  83. entries[i].reg, &fops_x32))
  84. break;
  85. }
  86. }
  87. static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
  88. struct dentry *dir)
  89. {
  90. int nr_entries;
  91. const struct debugfs_entries debugfs_regs[] = {
  92. REGISTER(ch_control1),
  93. REGISTER(ch_control2),
  94. REGISTER(transfer_size),
  95. REGISTER(sar.lsb),
  96. REGISTER(sar.msb),
  97. REGISTER(dar.lsb),
  98. REGISTER(dar.msb),
  99. REGISTER(llp.lsb),
  100. REGISTER(llp.msb),
  101. };
  102. nr_entries = ARRAY_SIZE(debugfs_regs);
  103. dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
  104. }
  105. static void dw_edma_debugfs_regs_wr(struct dentry *dir)
  106. {
  107. const struct debugfs_entries debugfs_regs[] = {
  108. /* eDMA global registers */
  109. WR_REGISTER(engine_en),
  110. WR_REGISTER(doorbell),
  111. WR_REGISTER(ch_arb_weight.lsb),
  112. WR_REGISTER(ch_arb_weight.msb),
  113. /* eDMA interrupts registers */
  114. WR_REGISTER(int_status),
  115. WR_REGISTER(int_mask),
  116. WR_REGISTER(int_clear),
  117. WR_REGISTER(err_status),
  118. WR_REGISTER(done_imwr.lsb),
  119. WR_REGISTER(done_imwr.msb),
  120. WR_REGISTER(abort_imwr.lsb),
  121. WR_REGISTER(abort_imwr.msb),
  122. WR_REGISTER(ch01_imwr_data),
  123. WR_REGISTER(ch23_imwr_data),
  124. WR_REGISTER(ch45_imwr_data),
  125. WR_REGISTER(ch67_imwr_data),
  126. WR_REGISTER(linked_list_err_en),
  127. };
  128. const struct debugfs_entries debugfs_unroll_regs[] = {
  129. /* eDMA channel context grouping */
  130. WR_REGISTER_UNROLL(engine_chgroup),
  131. WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
  132. WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
  133. WR_REGISTER_UNROLL(ch0_pwr_en),
  134. WR_REGISTER_UNROLL(ch1_pwr_en),
  135. WR_REGISTER_UNROLL(ch2_pwr_en),
  136. WR_REGISTER_UNROLL(ch3_pwr_en),
  137. WR_REGISTER_UNROLL(ch4_pwr_en),
  138. WR_REGISTER_UNROLL(ch5_pwr_en),
  139. WR_REGISTER_UNROLL(ch6_pwr_en),
  140. WR_REGISTER_UNROLL(ch7_pwr_en),
  141. };
  142. struct dentry *regs_dir, *ch_dir;
  143. int nr_entries, i;
  144. char name[16];
  145. regs_dir = debugfs_create_dir(WRITE_STR, dir);
  146. if (!regs_dir)
  147. return;
  148. nr_entries = ARRAY_SIZE(debugfs_regs);
  149. dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
  150. if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
  151. nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
  152. dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
  153. regs_dir);
  154. }
  155. for (i = 0; i < dw->wr_ch_cnt; i++) {
  156. snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
  157. ch_dir = debugfs_create_dir(name, regs_dir);
  158. if (!ch_dir)
  159. return;
  160. dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].wr, ch_dir);
  161. lim[0][i].start = &regs->type.unroll.ch[i].wr;
  162. lim[0][i].end = &regs->type.unroll.ch[i].padding_1[0];
  163. }
  164. }
  165. static void dw_edma_debugfs_regs_rd(struct dentry *dir)
  166. {
  167. const struct debugfs_entries debugfs_regs[] = {
  168. /* eDMA global registers */
  169. RD_REGISTER(engine_en),
  170. RD_REGISTER(doorbell),
  171. RD_REGISTER(ch_arb_weight.lsb),
  172. RD_REGISTER(ch_arb_weight.msb),
  173. /* eDMA interrupts registers */
  174. RD_REGISTER(int_status),
  175. RD_REGISTER(int_mask),
  176. RD_REGISTER(int_clear),
  177. RD_REGISTER(err_status.lsb),
  178. RD_REGISTER(err_status.msb),
  179. RD_REGISTER(linked_list_err_en),
  180. RD_REGISTER(done_imwr.lsb),
  181. RD_REGISTER(done_imwr.msb),
  182. RD_REGISTER(abort_imwr.lsb),
  183. RD_REGISTER(abort_imwr.msb),
  184. RD_REGISTER(ch01_imwr_data),
  185. RD_REGISTER(ch23_imwr_data),
  186. RD_REGISTER(ch45_imwr_data),
  187. RD_REGISTER(ch67_imwr_data),
  188. };
  189. const struct debugfs_entries debugfs_unroll_regs[] = {
  190. /* eDMA channel context grouping */
  191. RD_REGISTER_UNROLL(engine_chgroup),
  192. RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
  193. RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
  194. RD_REGISTER_UNROLL(ch0_pwr_en),
  195. RD_REGISTER_UNROLL(ch1_pwr_en),
  196. RD_REGISTER_UNROLL(ch2_pwr_en),
  197. RD_REGISTER_UNROLL(ch3_pwr_en),
  198. RD_REGISTER_UNROLL(ch4_pwr_en),
  199. RD_REGISTER_UNROLL(ch5_pwr_en),
  200. RD_REGISTER_UNROLL(ch6_pwr_en),
  201. RD_REGISTER_UNROLL(ch7_pwr_en),
  202. };
  203. struct dentry *regs_dir, *ch_dir;
  204. int nr_entries, i;
  205. char name[16];
  206. regs_dir = debugfs_create_dir(READ_STR, dir);
  207. if (!regs_dir)
  208. return;
  209. nr_entries = ARRAY_SIZE(debugfs_regs);
  210. dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
  211. if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
  212. nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
  213. dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
  214. regs_dir);
  215. }
  216. for (i = 0; i < dw->rd_ch_cnt; i++) {
  217. snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
  218. ch_dir = debugfs_create_dir(name, regs_dir);
  219. if (!ch_dir)
  220. return;
  221. dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].rd, ch_dir);
  222. lim[1][i].start = &regs->type.unroll.ch[i].rd;
  223. lim[1][i].end = &regs->type.unroll.ch[i].padding_2[0];
  224. }
  225. }
  226. static void dw_edma_debugfs_regs(void)
  227. {
  228. const struct debugfs_entries debugfs_regs[] = {
  229. REGISTER(ctrl_data_arb_prior),
  230. REGISTER(ctrl),
  231. };
  232. struct dentry *regs_dir;
  233. int nr_entries;
  234. regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
  235. if (!regs_dir)
  236. return;
  237. nr_entries = ARRAY_SIZE(debugfs_regs);
  238. dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
  239. dw_edma_debugfs_regs_wr(regs_dir);
  240. dw_edma_debugfs_regs_rd(regs_dir);
  241. }
  242. void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
  243. {
  244. dw = _dw;
  245. if (!dw)
  246. return;
  247. regs = dw->chip->reg_base;
  248. if (!regs)
  249. return;
  250. dw->debugfs = debugfs_create_dir(dw->name, NULL);
  251. if (!dw->debugfs)
  252. return;
  253. debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
  254. debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
  255. debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
  256. dw_edma_debugfs_regs();
  257. }
  258. void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
  259. {
  260. dw = _dw;
  261. if (!dw)
  262. return;
  263. debugfs_remove_recursive(dw->debugfs);
  264. dw->debugfs = NULL;
  265. }