debugfs.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2022 HiSilicon Limited. */
  3. #include <linux/hisi_acc_qm.h>
  4. #include "qm_common.h"
  5. #define QM_DFX_BASE 0x0100000
  6. #define QM_DFX_STATE1 0x0104000
  7. #define QM_DFX_STATE2 0x01040C8
  8. #define QM_DFX_COMMON 0x0000
  9. #define QM_DFX_BASE_LEN 0x5A
  10. #define QM_DFX_STATE1_LEN 0x2E
  11. #define QM_DFX_STATE2_LEN 0x11
  12. #define QM_DFX_COMMON_LEN 0xC3
  13. #define QM_DFX_REGS_LEN 4UL
  14. #define QM_DBG_TMP_BUF_LEN 22
  15. #define CURRENT_FUN_MASK GENMASK(5, 0)
  16. #define CURRENT_Q_MASK GENMASK(31, 16)
  17. #define QM_SQE_ADDR_MASK GENMASK(7, 0)
  18. #define QM_DFX_MB_CNT_VF 0x104010
  19. #define QM_DFX_DB_CNT_VF 0x104020
  20. #define QM_DFX_SQE_CNT_VF_SQN 0x104030
  21. #define QM_DFX_CQE_CNT_VF_CQN 0x104040
  22. #define QM_DFX_QN_SHIFT 16
  23. #define QM_DFX_CNT_CLR_CE 0x100118
  24. #define QM_DBG_WRITE_LEN 1024
  25. static const char * const qm_debug_file_name[] = {
  26. [CURRENT_QM] = "current_qm",
  27. [CURRENT_Q] = "current_q",
  28. [CLEAR_ENABLE] = "clear_enable",
  29. };
  30. struct qm_dfx_item {
  31. const char *name;
  32. u32 offset;
  33. };
  34. static struct qm_dfx_item qm_dfx_files[] = {
  35. {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
  36. {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
  37. {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
  38. {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
  39. {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
  40. };
  41. #define CNT_CYC_REGS_NUM 10
  42. static const struct debugfs_reg32 qm_dfx_regs[] = {
  43. /* XXX_CNT are reading clear register */
  44. {"QM_ECC_1BIT_CNT ", 0x104000ull},
  45. {"QM_ECC_MBIT_CNT ", 0x104008ull},
  46. {"QM_DFX_MB_CNT ", 0x104018ull},
  47. {"QM_DFX_DB_CNT ", 0x104028ull},
  48. {"QM_DFX_SQE_CNT ", 0x104038ull},
  49. {"QM_DFX_CQE_CNT ", 0x104048ull},
  50. {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
  51. {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
  52. {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
  53. {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
  54. {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
  55. {"QM_ECC_1BIT_INF ", 0x104004ull},
  56. {"QM_ECC_MBIT_INF ", 0x10400cull},
  57. {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
  58. {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
  59. {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
  60. {"QM_DFX_FF_ST0 ", 0x1040c8ull},
  61. {"QM_DFX_FF_ST1 ", 0x1040ccull},
  62. {"QM_DFX_FF_ST2 ", 0x1040d0ull},
  63. {"QM_DFX_FF_ST3 ", 0x1040d4ull},
  64. {"QM_DFX_FF_ST4 ", 0x1040d8ull},
  65. {"QM_DFX_FF_ST5 ", 0x1040dcull},
  66. {"QM_DFX_FF_ST6 ", 0x1040e0ull},
  67. {"QM_IN_IDLE_ST ", 0x1040e4ull},
  68. };
  69. static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
  70. {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
  71. };
  72. /* define the QM's dfx regs region and region length */
  73. static struct dfx_diff_registers qm_diff_regs[] = {
  74. {
  75. .reg_offset = QM_DFX_BASE,
  76. .reg_len = QM_DFX_BASE_LEN,
  77. }, {
  78. .reg_offset = QM_DFX_STATE1,
  79. .reg_len = QM_DFX_STATE1_LEN,
  80. }, {
  81. .reg_offset = QM_DFX_STATE2,
  82. .reg_len = QM_DFX_STATE2_LEN,
  83. }, {
  84. .reg_offset = QM_DFX_COMMON,
  85. .reg_len = QM_DFX_COMMON_LEN,
  86. },
  87. };
  88. static struct hisi_qm *file_to_qm(struct debugfs_file *file)
  89. {
  90. struct qm_debug *debug = file->debug;
  91. return container_of(debug, struct hisi_qm, debug);
  92. }
  93. static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
  94. size_t count, loff_t *pos)
  95. {
  96. char buf[QM_DBG_READ_LEN];
  97. int len;
  98. len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
  99. "Please echo help to cmd to get help information");
  100. return simple_read_from_buffer(buffer, count, pos, buf, len);
  101. }
  102. static void dump_show(struct hisi_qm *qm, void *info,
  103. unsigned int info_size, char *info_name)
  104. {
  105. struct device *dev = &qm->pdev->dev;
  106. u8 *info_curr = info;
  107. u32 i;
  108. #define BYTE_PER_DW 4
  109. dev_info(dev, "%s DUMP\n", info_name);
  110. for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
  111. pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
  112. *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
  113. }
  114. }
  115. static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
  116. {
  117. struct device *dev = &qm->pdev->dev;
  118. struct qm_sqc *sqc, *sqc_curr;
  119. dma_addr_t sqc_dma;
  120. u32 qp_id;
  121. int ret;
  122. if (!s)
  123. return -EINVAL;
  124. ret = kstrtou32(s, 0, &qp_id);
  125. if (ret || qp_id >= qm->qp_num) {
  126. dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
  127. return -EINVAL;
  128. }
  129. sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
  130. if (IS_ERR(sqc))
  131. return PTR_ERR(sqc);
  132. ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1);
  133. if (ret) {
  134. down_read(&qm->qps_lock);
  135. if (qm->sqc) {
  136. sqc_curr = qm->sqc + qp_id;
  137. dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
  138. }
  139. up_read(&qm->qps_lock);
  140. goto free_ctx;
  141. }
  142. dump_show(qm, sqc, sizeof(*sqc), "SQC");
  143. free_ctx:
  144. hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
  145. return 0;
  146. }
  147. static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
  148. {
  149. struct device *dev = &qm->pdev->dev;
  150. struct qm_cqc *cqc, *cqc_curr;
  151. dma_addr_t cqc_dma;
  152. u32 qp_id;
  153. int ret;
  154. if (!s)
  155. return -EINVAL;
  156. ret = kstrtou32(s, 0, &qp_id);
  157. if (ret || qp_id >= qm->qp_num) {
  158. dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
  159. return -EINVAL;
  160. }
  161. cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
  162. if (IS_ERR(cqc))
  163. return PTR_ERR(cqc);
  164. ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1);
  165. if (ret) {
  166. down_read(&qm->qps_lock);
  167. if (qm->cqc) {
  168. cqc_curr = qm->cqc + qp_id;
  169. dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
  170. }
  171. up_read(&qm->qps_lock);
  172. goto free_ctx;
  173. }
  174. dump_show(qm, cqc, sizeof(*cqc), "CQC");
  175. free_ctx:
  176. hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
  177. return 0;
  178. }
  179. static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
  180. int cmd, char *name)
  181. {
  182. struct device *dev = &qm->pdev->dev;
  183. dma_addr_t xeqc_dma;
  184. void *xeqc;
  185. int ret;
  186. if (strsep(&s, " ")) {
  187. dev_err(dev, "Please do not input extra characters!\n");
  188. return -EINVAL;
  189. }
  190. xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma);
  191. if (IS_ERR(xeqc))
  192. return PTR_ERR(xeqc);
  193. ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
  194. if (ret)
  195. goto err_free_ctx;
  196. dump_show(qm, xeqc, size, name);
  197. err_free_ctx:
  198. hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma);
  199. return ret;
  200. }
  201. static int q_dump_param_parse(struct hisi_qm *qm, char *s,
  202. u32 *e_id, u32 *q_id, u16 q_depth)
  203. {
  204. struct device *dev = &qm->pdev->dev;
  205. unsigned int qp_num = qm->qp_num;
  206. char *presult;
  207. int ret;
  208. presult = strsep(&s, " ");
  209. if (!presult) {
  210. dev_err(dev, "Please input qp number!\n");
  211. return -EINVAL;
  212. }
  213. ret = kstrtou32(presult, 0, q_id);
  214. if (ret || *q_id >= qp_num) {
  215. dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
  216. return -EINVAL;
  217. }
  218. presult = strsep(&s, " ");
  219. if (!presult) {
  220. dev_err(dev, "Please input sqe number!\n");
  221. return -EINVAL;
  222. }
  223. ret = kstrtou32(presult, 0, e_id);
  224. if (ret || *e_id >= q_depth) {
  225. dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
  226. return -EINVAL;
  227. }
  228. if (strsep(&s, " ")) {
  229. dev_err(dev, "Please do not input extra characters!\n");
  230. return -EINVAL;
  231. }
  232. return 0;
  233. }
  234. static int qm_sq_dump(struct hisi_qm *qm, char *s)
  235. {
  236. u16 sq_depth = qm->qp_array->cq_depth;
  237. void *sqe, *sqe_curr;
  238. struct hisi_qp *qp;
  239. u32 qp_id, sqe_id;
  240. int ret;
  241. ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
  242. if (ret)
  243. return ret;
  244. sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
  245. if (!sqe)
  246. return -ENOMEM;
  247. qp = &qm->qp_array[qp_id];
  248. memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
  249. sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
  250. memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
  251. qm->debug.sqe_mask_len);
  252. dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
  253. kfree(sqe);
  254. return 0;
  255. }
  256. static int qm_cq_dump(struct hisi_qm *qm, char *s)
  257. {
  258. struct qm_cqe *cqe_curr;
  259. struct hisi_qp *qp;
  260. u32 qp_id, cqe_id;
  261. int ret;
  262. ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
  263. if (ret)
  264. return ret;
  265. qp = &qm->qp_array[qp_id];
  266. cqe_curr = qp->cqe + cqe_id;
  267. dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
  268. return 0;
  269. }
  270. static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
  271. size_t size, char *name)
  272. {
  273. struct device *dev = &qm->pdev->dev;
  274. void *xeqe;
  275. u32 xeqe_id;
  276. int ret;
  277. if (!s)
  278. return -EINVAL;
  279. ret = kstrtou32(s, 0, &xeqe_id);
  280. if (ret)
  281. return -EINVAL;
  282. if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
  283. dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
  284. return -EINVAL;
  285. } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
  286. dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
  287. return -EINVAL;
  288. }
  289. down_read(&qm->qps_lock);
  290. if (qm->eqe && !strcmp(name, "EQE")) {
  291. xeqe = qm->eqe + xeqe_id;
  292. } else if (qm->aeqe && !strcmp(name, "AEQE")) {
  293. xeqe = qm->aeqe + xeqe_id;
  294. } else {
  295. ret = -EINVAL;
  296. goto err_unlock;
  297. }
  298. dump_show(qm, xeqe, size, name);
  299. err_unlock:
  300. up_read(&qm->qps_lock);
  301. return ret;
  302. }
  303. static int qm_dbg_help(struct hisi_qm *qm, char *s)
  304. {
  305. struct device *dev = &qm->pdev->dev;
  306. if (strsep(&s, " ")) {
  307. dev_err(dev, "Please do not input extra characters!\n");
  308. return -EINVAL;
  309. }
  310. dev_info(dev, "available commands:\n");
  311. dev_info(dev, "sqc <num>\n");
  312. dev_info(dev, "cqc <num>\n");
  313. dev_info(dev, "eqc\n");
  314. dev_info(dev, "aeqc\n");
  315. dev_info(dev, "sq <num> <e>\n");
  316. dev_info(dev, "cq <num> <e>\n");
  317. dev_info(dev, "eq <e>\n");
  318. dev_info(dev, "aeq <e>\n");
  319. return 0;
  320. }
  321. static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
  322. {
  323. struct device *dev = &qm->pdev->dev;
  324. char *presult, *s, *s_tmp;
  325. int ret;
  326. s = kstrdup(cmd_buf, GFP_KERNEL);
  327. if (!s)
  328. return -ENOMEM;
  329. s_tmp = s;
  330. presult = strsep(&s, " ");
  331. if (!presult) {
  332. ret = -EINVAL;
  333. goto err_buffer_free;
  334. }
  335. if (!strcmp(presult, "sqc"))
  336. ret = qm_sqc_dump(qm, s);
  337. else if (!strcmp(presult, "cqc"))
  338. ret = qm_cqc_dump(qm, s);
  339. else if (!strcmp(presult, "eqc"))
  340. ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
  341. QM_MB_CMD_EQC, "EQC");
  342. else if (!strcmp(presult, "aeqc"))
  343. ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
  344. QM_MB_CMD_AEQC, "AEQC");
  345. else if (!strcmp(presult, "sq"))
  346. ret = qm_sq_dump(qm, s);
  347. else if (!strcmp(presult, "cq"))
  348. ret = qm_cq_dump(qm, s);
  349. else if (!strcmp(presult, "eq"))
  350. ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
  351. else if (!strcmp(presult, "aeq"))
  352. ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
  353. else if (!strcmp(presult, "help"))
  354. ret = qm_dbg_help(qm, s);
  355. else
  356. ret = -EINVAL;
  357. if (ret)
  358. dev_info(dev, "Please echo help\n");
  359. err_buffer_free:
  360. kfree(s_tmp);
  361. return ret;
  362. }
  363. static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
  364. size_t count, loff_t *pos)
  365. {
  366. struct hisi_qm *qm = filp->private_data;
  367. char *cmd_buf, *cmd_buf_tmp;
  368. int ret;
  369. if (*pos)
  370. return 0;
  371. ret = hisi_qm_get_dfx_access(qm);
  372. if (ret)
  373. return ret;
  374. /* Judge if the instance is being reset. */
  375. if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
  376. ret = 0;
  377. goto put_dfx_access;
  378. }
  379. if (count > QM_DBG_WRITE_LEN) {
  380. ret = -ENOSPC;
  381. goto put_dfx_access;
  382. }
  383. cmd_buf = memdup_user_nul(buffer, count);
  384. if (IS_ERR(cmd_buf)) {
  385. ret = PTR_ERR(cmd_buf);
  386. goto put_dfx_access;
  387. }
  388. cmd_buf_tmp = strchr(cmd_buf, '\n');
  389. if (cmd_buf_tmp) {
  390. *cmd_buf_tmp = '\0';
  391. count = cmd_buf_tmp - cmd_buf + 1;
  392. }
  393. ret = qm_cmd_write_dump(qm, cmd_buf);
  394. if (ret) {
  395. kfree(cmd_buf);
  396. goto put_dfx_access;
  397. }
  398. kfree(cmd_buf);
  399. ret = count;
  400. put_dfx_access:
  401. hisi_qm_put_dfx_access(qm);
  402. return ret;
  403. }
  404. static const struct file_operations qm_cmd_fops = {
  405. .owner = THIS_MODULE,
  406. .open = simple_open,
  407. .read = qm_cmd_read,
  408. .write = qm_cmd_write,
  409. };
  410. /**
  411. * hisi_qm_regs_dump() - Dump registers's value.
  412. * @s: debugfs file handle.
  413. * @regset: accelerator registers information.
  414. *
  415. * Dump accelerator registers.
  416. */
  417. void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
  418. {
  419. struct pci_dev *pdev = to_pci_dev(regset->dev);
  420. struct hisi_qm *qm = pci_get_drvdata(pdev);
  421. const struct debugfs_reg32 *regs = regset->regs;
  422. int regs_len = regset->nregs;
  423. int i, ret;
  424. u32 val;
  425. ret = hisi_qm_get_dfx_access(qm);
  426. if (ret)
  427. return;
  428. for (i = 0; i < regs_len; i++) {
  429. val = readl(regset->base + regs[i].offset);
  430. seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
  431. }
  432. hisi_qm_put_dfx_access(qm);
  433. }
  434. EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
  435. static int qm_regs_show(struct seq_file *s, void *unused)
  436. {
  437. struct hisi_qm *qm = s->private;
  438. struct debugfs_regset32 regset;
  439. if (qm->fun_type == QM_HW_PF) {
  440. regset.regs = qm_dfx_regs;
  441. regset.nregs = ARRAY_SIZE(qm_dfx_regs);
  442. } else {
  443. regset.regs = qm_vf_dfx_regs;
  444. regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
  445. }
  446. regset.base = qm->io_base;
  447. regset.dev = &qm->pdev->dev;
  448. hisi_qm_regs_dump(s, &regset);
  449. return 0;
  450. }
  451. DEFINE_SHOW_ATTRIBUTE(qm_regs);
  452. static u32 current_q_read(struct hisi_qm *qm)
  453. {
  454. return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
  455. }
  456. static int current_q_write(struct hisi_qm *qm, u32 val)
  457. {
  458. u32 tmp;
  459. if (val >= qm->debug.curr_qm_qp_num)
  460. return -EINVAL;
  461. tmp = val << QM_DFX_QN_SHIFT |
  462. (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
  463. writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
  464. tmp = val << QM_DFX_QN_SHIFT |
  465. (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
  466. writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
  467. return 0;
  468. }
  469. static u32 clear_enable_read(struct hisi_qm *qm)
  470. {
  471. return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
  472. }
  473. /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
  474. static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
  475. {
  476. if (rd_clr_ctrl > 1)
  477. return -EINVAL;
  478. writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
  479. return 0;
  480. }
  481. static u32 current_qm_read(struct hisi_qm *qm)
  482. {
  483. return readl(qm->io_base + QM_DFX_MB_CNT_VF);
  484. }
  485. static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
  486. {
  487. u32 remain_q_num, vfq_num;
  488. u32 num_vfs = qm->vfs_num;
  489. vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
  490. if (vfq_num >= qm->max_qp_num)
  491. return qm->max_qp_num;
  492. remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
  493. if (vfq_num + remain_q_num <= qm->max_qp_num)
  494. return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
  495. /*
  496. * if vfq_num + remain_q_num > max_qp_num, the last VFs,
  497. * each with one more queue.
  498. */
  499. return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
  500. }
  501. static int current_qm_write(struct hisi_qm *qm, u32 val)
  502. {
  503. u32 tmp;
  504. if (val > qm->vfs_num)
  505. return -EINVAL;
  506. /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
  507. if (!val)
  508. qm->debug.curr_qm_qp_num = qm->qp_num;
  509. else
  510. qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
  511. writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
  512. writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
  513. tmp = val |
  514. (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
  515. writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
  516. tmp = val |
  517. (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
  518. writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
  519. return 0;
  520. }
  521. static ssize_t qm_debug_read(struct file *filp, char __user *buf,
  522. size_t count, loff_t *pos)
  523. {
  524. struct debugfs_file *file = filp->private_data;
  525. enum qm_debug_file index = file->index;
  526. struct hisi_qm *qm = file_to_qm(file);
  527. char tbuf[QM_DBG_TMP_BUF_LEN];
  528. u32 val;
  529. int ret;
  530. ret = hisi_qm_get_dfx_access(qm);
  531. if (ret)
  532. return ret;
  533. mutex_lock(&file->lock);
  534. switch (index) {
  535. case CURRENT_QM:
  536. val = current_qm_read(qm);
  537. break;
  538. case CURRENT_Q:
  539. val = current_q_read(qm);
  540. break;
  541. case CLEAR_ENABLE:
  542. val = clear_enable_read(qm);
  543. break;
  544. default:
  545. goto err_input;
  546. }
  547. mutex_unlock(&file->lock);
  548. hisi_qm_put_dfx_access(qm);
  549. ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
  550. return simple_read_from_buffer(buf, count, pos, tbuf, ret);
  551. err_input:
  552. mutex_unlock(&file->lock);
  553. hisi_qm_put_dfx_access(qm);
  554. return -EINVAL;
  555. }
  556. static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
  557. size_t count, loff_t *pos)
  558. {
  559. struct debugfs_file *file = filp->private_data;
  560. enum qm_debug_file index = file->index;
  561. struct hisi_qm *qm = file_to_qm(file);
  562. unsigned long val;
  563. char tbuf[QM_DBG_TMP_BUF_LEN];
  564. int len, ret;
  565. if (*pos != 0)
  566. return 0;
  567. if (count >= QM_DBG_TMP_BUF_LEN)
  568. return -ENOSPC;
  569. len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
  570. count);
  571. if (len < 0)
  572. return len;
  573. tbuf[len] = '\0';
  574. if (kstrtoul(tbuf, 0, &val))
  575. return -EFAULT;
  576. ret = hisi_qm_get_dfx_access(qm);
  577. if (ret)
  578. return ret;
  579. mutex_lock(&file->lock);
  580. switch (index) {
  581. case CURRENT_QM:
  582. ret = current_qm_write(qm, val);
  583. break;
  584. case CURRENT_Q:
  585. ret = current_q_write(qm, val);
  586. break;
  587. case CLEAR_ENABLE:
  588. ret = clear_enable_write(qm, val);
  589. break;
  590. default:
  591. ret = -EINVAL;
  592. }
  593. mutex_unlock(&file->lock);
  594. hisi_qm_put_dfx_access(qm);
  595. if (ret)
  596. return ret;
  597. return count;
  598. }
  599. static const struct file_operations qm_debug_fops = {
  600. .owner = THIS_MODULE,
  601. .open = simple_open,
  602. .read = qm_debug_read,
  603. .write = qm_debug_write,
  604. };
  605. static void dfx_regs_uninit(struct hisi_qm *qm,
  606. struct dfx_diff_registers *dregs, int reg_len)
  607. {
  608. int i;
  609. /* Setting the pointer is NULL to prevent double free */
  610. for (i = 0; i < reg_len; i++) {
  611. kfree(dregs[i].regs);
  612. dregs[i].regs = NULL;
  613. }
  614. kfree(dregs);
  615. }
  616. static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
  617. const struct dfx_diff_registers *cregs, u32 reg_len)
  618. {
  619. struct dfx_diff_registers *diff_regs;
  620. u32 j, base_offset;
  621. int i;
  622. diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
  623. if (!diff_regs)
  624. return ERR_PTR(-ENOMEM);
  625. for (i = 0; i < reg_len; i++) {
  626. if (!cregs[i].reg_len)
  627. continue;
  628. diff_regs[i].reg_offset = cregs[i].reg_offset;
  629. diff_regs[i].reg_len = cregs[i].reg_len;
  630. diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
  631. GFP_KERNEL);
  632. if (!diff_regs[i].regs)
  633. goto alloc_error;
  634. for (j = 0; j < diff_regs[i].reg_len; j++) {
  635. base_offset = diff_regs[i].reg_offset +
  636. j * QM_DFX_REGS_LEN;
  637. diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
  638. }
  639. }
  640. return diff_regs;
  641. alloc_error:
  642. while (i > 0) {
  643. i--;
  644. kfree(diff_regs[i].regs);
  645. }
  646. kfree(diff_regs);
  647. return ERR_PTR(-ENOMEM);
  648. }
  649. static int qm_diff_regs_init(struct hisi_qm *qm,
  650. struct dfx_diff_registers *dregs, u32 reg_len)
  651. {
  652. qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
  653. if (IS_ERR(qm->debug.qm_diff_regs))
  654. return PTR_ERR(qm->debug.qm_diff_regs);
  655. qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
  656. if (IS_ERR(qm->debug.acc_diff_regs)) {
  657. dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
  658. return PTR_ERR(qm->debug.acc_diff_regs);
  659. }
  660. return 0;
  661. }
  662. static void qm_last_regs_uninit(struct hisi_qm *qm)
  663. {
  664. struct qm_debug *debug = &qm->debug;
  665. if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
  666. return;
  667. kfree(debug->qm_last_words);
  668. debug->qm_last_words = NULL;
  669. }
  670. static int qm_last_regs_init(struct hisi_qm *qm)
  671. {
  672. int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
  673. struct qm_debug *debug = &qm->debug;
  674. int i;
  675. if (qm->fun_type == QM_HW_VF)
  676. return 0;
  677. debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
  678. if (!debug->qm_last_words)
  679. return -ENOMEM;
  680. for (i = 0; i < dfx_regs_num; i++) {
  681. debug->qm_last_words[i] = readl_relaxed(qm->io_base +
  682. qm_dfx_regs[i].offset);
  683. }
  684. return 0;
  685. }
  686. static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len)
  687. {
  688. dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
  689. dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
  690. }
  691. /**
  692. * hisi_qm_regs_debugfs_init() - Allocate memory for registers.
  693. * @qm: device qm handle.
  694. * @dregs: diff registers handle.
  695. * @reg_len: diff registers region length.
  696. */
  697. int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
  698. struct dfx_diff_registers *dregs, u32 reg_len)
  699. {
  700. int ret;
  701. if (!qm || !dregs)
  702. return -EINVAL;
  703. if (qm->fun_type != QM_HW_PF)
  704. return 0;
  705. ret = qm_last_regs_init(qm);
  706. if (ret) {
  707. dev_info(&qm->pdev->dev, "failed to init qm words memory!\n");
  708. return ret;
  709. }
  710. ret = qm_diff_regs_init(qm, dregs, reg_len);
  711. if (ret) {
  712. qm_last_regs_uninit(qm);
  713. return ret;
  714. }
  715. return 0;
  716. }
  717. EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_init);
  718. /**
  719. * hisi_qm_regs_debugfs_uninit() - Free memory for registers.
  720. * @qm: device qm handle.
  721. * @reg_len: diff registers region length.
  722. */
  723. void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len)
  724. {
  725. if (!qm || qm->fun_type != QM_HW_PF)
  726. return;
  727. qm_diff_regs_uninit(qm, reg_len);
  728. qm_last_regs_uninit(qm);
  729. }
  730. EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_uninit);
  731. /**
  732. * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
  733. * @qm: device qm handle.
  734. * @s: Debugfs file handle.
  735. * @dregs: diff registers handle.
  736. * @regs_len: diff registers region length.
  737. */
  738. void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
  739. struct dfx_diff_registers *dregs, u32 regs_len)
  740. {
  741. u32 j, val, base_offset;
  742. int i, ret;
  743. if (!qm || !s || !dregs)
  744. return;
  745. ret = hisi_qm_get_dfx_access(qm);
  746. if (ret)
  747. return;
  748. down_read(&qm->qps_lock);
  749. for (i = 0; i < regs_len; i++) {
  750. if (!dregs[i].reg_len)
  751. continue;
  752. for (j = 0; j < dregs[i].reg_len; j++) {
  753. base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
  754. val = readl(qm->io_base + base_offset);
  755. if (val != dregs[i].regs[j])
  756. seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
  757. base_offset, dregs[i].regs[j], val);
  758. }
  759. }
  760. up_read(&qm->qps_lock);
  761. hisi_qm_put_dfx_access(qm);
  762. }
  763. EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
  764. void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
  765. {
  766. struct qm_debug *debug = &qm->debug;
  767. struct pci_dev *pdev = qm->pdev;
  768. u32 val;
  769. int i;
  770. if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
  771. return;
  772. for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
  773. val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
  774. if (debug->qm_last_words[i] != val)
  775. pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
  776. qm_dfx_regs[i].name, debug->qm_last_words[i], val);
  777. }
  778. }
  779. static int qm_diff_regs_show(struct seq_file *s, void *unused)
  780. {
  781. struct hisi_qm *qm = s->private;
  782. hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
  783. ARRAY_SIZE(qm_diff_regs));
  784. return 0;
  785. }
  786. DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
  787. static ssize_t qm_status_read(struct file *filp, char __user *buffer,
  788. size_t count, loff_t *pos)
  789. {
  790. struct hisi_qm *qm = filp->private_data;
  791. char buf[QM_DBG_READ_LEN];
  792. int val, len;
  793. val = atomic_read(&qm->status.flags);
  794. len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
  795. return simple_read_from_buffer(buffer, count, pos, buf, len);
  796. }
  797. static const struct file_operations qm_status_fops = {
  798. .owner = THIS_MODULE,
  799. .open = simple_open,
  800. .read = qm_status_read,
  801. };
  802. static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
  803. enum qm_debug_file index)
  804. {
  805. struct debugfs_file *file = qm->debug.files + index;
  806. debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
  807. &qm_debug_fops);
  808. file->index = index;
  809. mutex_init(&file->lock);
  810. file->debug = &qm->debug;
  811. }
  812. static int qm_debugfs_atomic64_set(void *data, u64 val)
  813. {
  814. if (val)
  815. return -EINVAL;
  816. atomic64_set((atomic64_t *)data, 0);
  817. return 0;
  818. }
  819. static int qm_debugfs_atomic64_get(void *data, u64 *val)
  820. {
  821. *val = atomic64_read((atomic64_t *)data);
  822. return 0;
  823. }
  824. DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
  825. qm_debugfs_atomic64_set, "%llu\n");
  826. /**
  827. * hisi_qm_debug_init() - Initialize qm related debugfs files.
  828. * @qm: The qm for which we want to add debugfs files.
  829. *
  830. * Create qm related debugfs files.
  831. */
  832. void hisi_qm_debug_init(struct hisi_qm *qm)
  833. {
  834. struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
  835. struct qm_dfx *dfx = &qm->debug.dfx;
  836. struct dentry *qm_d;
  837. void *data;
  838. int i;
  839. qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
  840. qm->debug.qm_d = qm_d;
  841. /* only show this in PF */
  842. if (qm->fun_type == QM_HW_PF) {
  843. qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
  844. for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
  845. qm_create_debugfs_file(qm, qm->debug.qm_d, i);
  846. }
  847. if (qm_regs)
  848. debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
  849. qm, &qm_diff_regs_fops);
  850. debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
  851. debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
  852. debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
  853. &qm_status_fops);
  854. for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
  855. data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
  856. debugfs_create_file(qm_dfx_files[i].name,
  857. 0644,
  858. qm_d,
  859. data,
  860. &qm_atomic64_ops);
  861. }
  862. if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
  863. hisi_qm_set_algqos_init(qm);
  864. }
  865. EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
  866. /**
  867. * hisi_qm_debug_regs_clear() - clear qm debug related registers.
  868. * @qm: The qm for which we want to clear its debug registers.
  869. */
  870. void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
  871. {
  872. const struct debugfs_reg32 *regs;
  873. int i;
  874. /* clear current_qm */
  875. writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
  876. writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
  877. /* clear current_q */
  878. writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
  879. writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
  880. /*
  881. * these registers are reading and clearing, so clear them after
  882. * reading them.
  883. */
  884. writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
  885. regs = qm_dfx_regs;
  886. for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
  887. readl(qm->io_base + regs->offset);
  888. regs++;
  889. }
  890. /* clear clear_enable */
  891. writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
  892. }
  893. EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);