zip_main.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <linux/acpi.h>
  4. #include <linux/aer.h>
  5. #include <linux/bitops.h>
  6. #include <linux/debugfs.h>
  7. #include <linux/init.h>
  8. #include <linux/io.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/pm_runtime.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/topology.h>
  15. #include <linux/uacce.h>
  16. #include "zip.h"
  17. #define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250
  18. #define HZIP_QUEUE_NUM_V1 4096
  19. #define HZIP_CLOCK_GATE_CTRL 0x301004
  20. #define HZIP_DECOMP_CHECK_ENABLE BIT(16)
  21. #define HZIP_FSM_MAX_CNT 0x301008
  22. #define HZIP_PORT_ARCA_CHE_0 0x301040
  23. #define HZIP_PORT_ARCA_CHE_1 0x301044
  24. #define HZIP_PORT_AWCA_CHE_0 0x301060
  25. #define HZIP_PORT_AWCA_CHE_1 0x301064
  26. #define HZIP_CACHE_ALL_EN 0xffffffff
  27. #define HZIP_BD_RUSER_32_63 0x301110
  28. #define HZIP_SGL_RUSER_32_63 0x30111c
  29. #define HZIP_DATA_RUSER_32_63 0x301128
  30. #define HZIP_DATA_WUSER_32_63 0x301134
  31. #define HZIP_BD_WUSER_32_63 0x301140
  32. #define HZIP_QM_IDEL_STATUS 0x3040e4
  33. #define HZIP_CORE_DFX_BASE 0x301000
  34. #define HZIP_CLOCK_GATED_CONTL 0X301004
  35. #define HZIP_CORE_DFX_COMP_0 0x302000
  36. #define HZIP_CORE_DFX_COMP_1 0x303000
  37. #define HZIP_CORE_DFX_DECOMP_0 0x304000
  38. #define HZIP_CORE_DFX_DECOMP_1 0x305000
  39. #define HZIP_CORE_DFX_DECOMP_2 0x306000
  40. #define HZIP_CORE_DFX_DECOMP_3 0x307000
  41. #define HZIP_CORE_DFX_DECOMP_4 0x308000
  42. #define HZIP_CORE_DFX_DECOMP_5 0x309000
  43. #define HZIP_CORE_REGS_BASE_LEN 0xB0
  44. #define HZIP_CORE_REGS_DFX_LEN 0x28
  45. #define HZIP_CORE_INT_SOURCE 0x3010A0
  46. #define HZIP_CORE_INT_MASK_REG 0x3010A4
  47. #define HZIP_CORE_INT_SET 0x3010A8
  48. #define HZIP_CORE_INT_STATUS 0x3010AC
  49. #define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
  50. #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
  51. #define HZIP_CORE_INT_RAS_CE_ENB 0x301160
  52. #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
  53. #define HZIP_CORE_INT_RAS_FE_ENB 0x301168
  54. #define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0
  55. #define HZIP_OOO_SHUTDOWN_SEL 0x30120C
  56. #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
  57. #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
  58. #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
  59. #define HZIP_SQE_SIZE 128
  60. #define HZIP_PF_DEF_Q_NUM 64
  61. #define HZIP_PF_DEF_Q_BASE 0
  62. #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
  63. #define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
  64. #define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
  65. #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
  66. #define HZIP_WR_PORT BIT(11)
  67. #define HZIP_DEV_ALG_MAX_LEN 256
  68. #define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
  69. #define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
  70. #define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
  71. #define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
  72. #define HZIP_BUF_SIZE 22
  73. #define HZIP_SQE_MASK_OFFSET 64
  74. #define HZIP_SQE_MASK_LEN 48
  75. #define HZIP_CNT_CLR_CE_EN BIT(0)
  76. #define HZIP_RO_CNT_CLR_CE_EN BIT(2)
  77. #define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \
  78. HZIP_RO_CNT_CLR_CE_EN)
  79. #define HZIP_PREFETCH_CFG 0x3011B0
  80. #define HZIP_SVA_TRANS 0x3011C4
  81. #define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
  82. #define HZIP_SVA_PREFETCH_DISABLE BIT(26)
  83. #define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
  84. #define HZIP_SHAPER_RATE_COMPRESS 750
  85. #define HZIP_SHAPER_RATE_DECOMPRESS 140
  86. #define HZIP_DELAY_1_US 1
  87. #define HZIP_POLL_TIMEOUT_US 1000
  88. /* clock gating */
  89. #define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
  90. #define HZIP_PEH_CFG_AUTO_GATE_EN BIT(0)
  91. #define HZIP_CORE_GATED_EN GENMASK(15, 8)
  92. #define HZIP_CORE_GATED_OOO_EN BIT(29)
  93. #define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \
  94. HZIP_CORE_GATED_OOO_EN)
  95. static const char hisi_zip_name[] = "hisi_zip";
  96. static struct dentry *hzip_debugfs_root;
  97. struct hisi_zip_hw_error {
  98. u32 int_msk;
  99. const char *msg;
  100. };
  101. struct zip_dfx_item {
  102. const char *name;
  103. u32 offset;
  104. };
  105. struct zip_dev_alg {
  106. u32 alg_msk;
  107. const char *algs;
  108. };
  109. static const struct zip_dev_alg zip_dev_algs[] = { {
  110. .alg_msk = HZIP_ALG_ZLIB_BIT,
  111. .algs = "zlib\n",
  112. }, {
  113. .alg_msk = HZIP_ALG_GZIP_BIT,
  114. .algs = "gzip\n",
  115. }, {
  116. .alg_msk = HZIP_ALG_DEFLATE_BIT,
  117. .algs = "deflate\n",
  118. }, {
  119. .alg_msk = HZIP_ALG_LZ77_BIT,
  120. .algs = "lz77_zstd\n",
  121. },
  122. };
  123. static struct hisi_qm_list zip_devices = {
  124. .register_to_crypto = hisi_zip_register_to_crypto,
  125. .unregister_from_crypto = hisi_zip_unregister_from_crypto,
  126. };
  127. static struct zip_dfx_item zip_dfx_files[] = {
  128. {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
  129. {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
  130. {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
  131. {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
  132. };
  133. static const struct hisi_zip_hw_error zip_hw_error[] = {
  134. { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
  135. { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
  136. { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" },
  137. { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" },
  138. { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" },
  139. { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" },
  140. { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" },
  141. { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" },
  142. { .int_msk = BIT(8), .msg = "zip_com_inf_err" },
  143. { .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
  144. { .int_msk = BIT(10), .msg = "zip_pre_out_err" },
  145. { .int_msk = BIT(11), .msg = "zip_axi_poison_err" },
  146. { .int_msk = BIT(12), .msg = "zip_sva_err" },
  147. { /* sentinel */ }
  148. };
  149. enum ctrl_debug_file_index {
  150. HZIP_CLEAR_ENABLE,
  151. HZIP_DEBUG_FILE_NUM,
  152. };
  153. static const char * const ctrl_debug_file_name[] = {
  154. [HZIP_CLEAR_ENABLE] = "clear_enable",
  155. };
  156. struct ctrl_debug_file {
  157. enum ctrl_debug_file_index index;
  158. spinlock_t lock;
  159. struct hisi_zip_ctrl *ctrl;
  160. };
  161. /*
  162. * One ZIP controller has one PF and multiple VFs, some global configurations
  163. * which PF has need this structure.
  164. *
  165. * Just relevant for PF.
  166. */
  167. struct hisi_zip_ctrl {
  168. struct hisi_zip *hisi_zip;
  169. struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
  170. };
  171. enum zip_cap_type {
  172. ZIP_QM_NFE_MASK_CAP = 0x0,
  173. ZIP_QM_RESET_MASK_CAP,
  174. ZIP_QM_OOO_SHUTDOWN_MASK_CAP,
  175. ZIP_QM_CE_MASK_CAP,
  176. ZIP_NFE_MASK_CAP,
  177. ZIP_RESET_MASK_CAP,
  178. ZIP_OOO_SHUTDOWN_MASK_CAP,
  179. ZIP_CE_MASK_CAP,
  180. ZIP_CLUSTER_NUM_CAP,
  181. ZIP_CORE_TYPE_NUM_CAP,
  182. ZIP_CORE_NUM_CAP,
  183. ZIP_CLUSTER_COMP_NUM_CAP,
  184. ZIP_CLUSTER_DECOMP_NUM_CAP,
  185. ZIP_DECOMP_ENABLE_BITMAP,
  186. ZIP_COMP_ENABLE_BITMAP,
  187. ZIP_DRV_ALG_BITMAP,
  188. ZIP_DEV_ALG_BITMAP,
  189. ZIP_CORE1_ALG_BITMAP,
  190. ZIP_CORE2_ALG_BITMAP,
  191. ZIP_CORE3_ALG_BITMAP,
  192. ZIP_CORE4_ALG_BITMAP,
  193. ZIP_CORE5_ALG_BITMAP,
  194. ZIP_CAP_MAX
  195. };
  196. static struct hisi_qm_cap_info zip_basic_cap_info[] = {
  197. {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77},
  198. {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77},
  199. {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
  200. {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
  201. {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE},
  202. {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE},
  203. {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE},
  204. {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
  205. {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1},
  206. {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2},
  207. {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5},
  208. {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2},
  209. {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
  210. {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
  211. {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
  212. {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF},
  213. {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF},
  214. {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
  215. {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
  216. {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
  217. {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
  218. {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
  219. {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
  220. };
  221. enum {
  222. HZIP_COMP_CORE0,
  223. HZIP_COMP_CORE1,
  224. HZIP_DECOMP_CORE0,
  225. HZIP_DECOMP_CORE1,
  226. HZIP_DECOMP_CORE2,
  227. HZIP_DECOMP_CORE3,
  228. HZIP_DECOMP_CORE4,
  229. HZIP_DECOMP_CORE5,
  230. };
  231. static const u64 core_offsets[] = {
  232. [HZIP_COMP_CORE0] = 0x302000,
  233. [HZIP_COMP_CORE1] = 0x303000,
  234. [HZIP_DECOMP_CORE0] = 0x304000,
  235. [HZIP_DECOMP_CORE1] = 0x305000,
  236. [HZIP_DECOMP_CORE2] = 0x306000,
  237. [HZIP_DECOMP_CORE3] = 0x307000,
  238. [HZIP_DECOMP_CORE4] = 0x308000,
  239. [HZIP_DECOMP_CORE5] = 0x309000,
  240. };
  241. static const struct debugfs_reg32 hzip_dfx_regs[] = {
  242. {"HZIP_GET_BD_NUM ", 0x00ull},
  243. {"HZIP_GET_RIGHT_BD ", 0x04ull},
  244. {"HZIP_GET_ERROR_BD ", 0x08ull},
  245. {"HZIP_DONE_BD_NUM ", 0x0cull},
  246. {"HZIP_WORK_CYCLE ", 0x10ull},
  247. {"HZIP_IDLE_CYCLE ", 0x18ull},
  248. {"HZIP_MAX_DELAY ", 0x20ull},
  249. {"HZIP_MIN_DELAY ", 0x24ull},
  250. {"HZIP_AVG_DELAY ", 0x28ull},
  251. {"HZIP_MEM_VISIBLE_DATA ", 0x30ull},
  252. {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull},
  253. {"HZIP_CONSUMED_BYTE ", 0x38ull},
  254. {"HZIP_PRODUCED_BYTE ", 0x40ull},
  255. {"HZIP_COMP_INF ", 0x70ull},
  256. {"HZIP_PRE_OUT ", 0x78ull},
  257. {"HZIP_BD_RD ", 0x7cull},
  258. {"HZIP_BD_WR ", 0x80ull},
  259. {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull},
  260. {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull},
  261. {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull},
  262. {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull},
  263. {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
  264. };
  265. static const struct debugfs_reg32 hzip_com_dfx_regs[] = {
  266. {"HZIP_CLOCK_GATE_CTRL ", 0x301004},
  267. {"HZIP_CORE_INT_RAS_CE_ENB ", 0x301160},
  268. {"HZIP_CORE_INT_RAS_NFE_ENB ", 0x301164},
  269. {"HZIP_CORE_INT_RAS_FE_ENB ", 0x301168},
  270. {"HZIP_UNCOM_ERR_RAS_CTRL ", 0x30116C},
  271. };
  272. static const struct debugfs_reg32 hzip_dump_dfx_regs[] = {
  273. {"HZIP_GET_BD_NUM ", 0x00ull},
  274. {"HZIP_GET_RIGHT_BD ", 0x04ull},
  275. {"HZIP_GET_ERROR_BD ", 0x08ull},
  276. {"HZIP_DONE_BD_NUM ", 0x0cull},
  277. {"HZIP_MAX_DELAY ", 0x20ull},
  278. };
  279. /* define the ZIP's dfx regs region and region length */
  280. static struct dfx_diff_registers hzip_diff_regs[] = {
  281. {
  282. .reg_offset = HZIP_CORE_DFX_BASE,
  283. .reg_len = HZIP_CORE_REGS_BASE_LEN,
  284. }, {
  285. .reg_offset = HZIP_CORE_DFX_COMP_0,
  286. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  287. }, {
  288. .reg_offset = HZIP_CORE_DFX_COMP_1,
  289. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  290. }, {
  291. .reg_offset = HZIP_CORE_DFX_DECOMP_0,
  292. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  293. }, {
  294. .reg_offset = HZIP_CORE_DFX_DECOMP_1,
  295. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  296. }, {
  297. .reg_offset = HZIP_CORE_DFX_DECOMP_2,
  298. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  299. }, {
  300. .reg_offset = HZIP_CORE_DFX_DECOMP_3,
  301. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  302. }, {
  303. .reg_offset = HZIP_CORE_DFX_DECOMP_4,
  304. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  305. }, {
  306. .reg_offset = HZIP_CORE_DFX_DECOMP_5,
  307. .reg_len = HZIP_CORE_REGS_DFX_LEN,
  308. },
  309. };
  310. static int hzip_diff_regs_show(struct seq_file *s, void *unused)
  311. {
  312. struct hisi_qm *qm = s->private;
  313. hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
  314. ARRAY_SIZE(hzip_diff_regs));
  315. return 0;
  316. }
  317. DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs);
  318. static const struct kernel_param_ops zip_uacce_mode_ops = {
  319. .set = uacce_mode_set,
  320. .get = param_get_int,
  321. };
  322. /*
  323. * uacce_mode = 0 means zip only register to crypto,
  324. * uacce_mode = 1 means zip both register to crypto and uacce.
  325. */
  326. static u32 uacce_mode = UACCE_MODE_NOUACCE;
  327. module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444);
  328. MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
  329. static bool pf_q_num_flag;
  330. static int pf_q_num_set(const char *val, const struct kernel_param *kp)
  331. {
  332. pf_q_num_flag = true;
  333. return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF);
  334. }
  335. static const struct kernel_param_ops pf_q_num_ops = {
  336. .set = pf_q_num_set,
  337. .get = param_get_int,
  338. };
  339. static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
  340. module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
  341. MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
  342. static const struct kernel_param_ops vfs_num_ops = {
  343. .set = vfs_num_set,
  344. .get = param_get_int,
  345. };
  346. static u32 vfs_num;
  347. module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
  348. MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
  349. static const struct pci_device_id hisi_zip_dev_ids[] = {
  350. { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_PF) },
  351. { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
  352. { 0, }
  353. };
  354. MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
  355. int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
  356. {
  357. if (node == NUMA_NO_NODE)
  358. node = cpu_to_node(smp_processor_id());
  359. return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
  360. }
  361. bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
  362. {
  363. u32 cap_val;
  364. cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
  365. if ((alg & cap_val) == alg)
  366. return true;
  367. return false;
  368. }
  369. static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
  370. {
  371. struct device *dev = &qm->pdev->dev;
  372. char *algs, *ptr;
  373. u32 alg_mask;
  374. int i;
  375. if (!qm->use_sva)
  376. return 0;
  377. algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
  378. if (!algs)
  379. return -ENOMEM;
  380. alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
  381. for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
  382. if (alg_mask & zip_dev_algs[i].alg_msk)
  383. strcat(algs, zip_dev_algs[i].algs);
  384. ptr = strrchr(algs, '\n');
  385. if (ptr)
  386. *ptr = '\0';
  387. qm->uacce->algs = algs;
  388. return 0;
  389. }
  390. static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
  391. {
  392. u32 val;
  393. int ret;
  394. if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
  395. return;
  396. /* Enable prefetch */
  397. val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
  398. val &= HZIP_PREFETCH_ENABLE;
  399. writel(val, qm->io_base + HZIP_PREFETCH_CFG);
  400. ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
  401. val, !(val & HZIP_SVA_PREFETCH_DISABLE),
  402. HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
  403. if (ret)
  404. pci_err(qm->pdev, "failed to open sva prefetch\n");
  405. }
  406. static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
  407. {
  408. u32 val;
  409. int ret;
  410. if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
  411. return;
  412. val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
  413. val |= HZIP_SVA_PREFETCH_DISABLE;
  414. writel(val, qm->io_base + HZIP_PREFETCH_CFG);
  415. ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
  416. val, !(val & HZIP_SVA_DISABLE_READY),
  417. HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
  418. if (ret)
  419. pci_err(qm->pdev, "failed to close sva prefetch\n");
  420. }
  421. static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
  422. {
  423. u32 val;
  424. if (qm->ver < QM_HW_V3)
  425. return;
  426. val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL);
  427. val |= HZIP_CLOCK_GATED_EN;
  428. writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL);
  429. val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
  430. val |= HZIP_PEH_CFG_AUTO_GATE_EN;
  431. writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE);
  432. }
  433. static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
  434. {
  435. void __iomem *base = qm->io_base;
  436. u32 dcomp_bm, comp_bm;
  437. /* qm user domain */
  438. writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
  439. writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
  440. writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
  441. writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
  442. writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
  443. /* qm cache */
  444. writel(AXI_M_CFG, base + QM_AXI_M_CFG);
  445. writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
  446. /* disable FLR triggered by BME(bus master enable) */
  447. writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
  448. writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
  449. /* cache */
  450. writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
  451. writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
  452. writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
  453. writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
  454. /* user domain configurations */
  455. writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
  456. writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
  457. if (qm->use_sva && qm->ver == QM_HW_V2) {
  458. writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
  459. writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
  460. writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_SGL_RUSER_32_63);
  461. } else {
  462. writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
  463. writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
  464. writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
  465. }
  466. /* let's open all compression/decompression cores */
  467. dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  468. ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
  469. comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  470. ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
  471. writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
  472. /* enable sqc,cqc writeback */
  473. writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
  474. CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
  475. FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
  476. hisi_zip_enable_clock_gate(qm);
  477. return 0;
  478. }
  479. static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
  480. {
  481. u32 val1, val2;
  482. val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  483. if (enable) {
  484. val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
  485. val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  486. ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  487. } else {
  488. val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
  489. val2 = 0x0;
  490. }
  491. if (qm->ver > QM_HW_V2)
  492. writel(val2, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
  493. writel(val1, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  494. }
  495. static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
  496. {
  497. u32 nfe, ce;
  498. if (qm->ver == QM_HW_V1) {
  499. writel(HZIP_CORE_INT_MASK_ALL,
  500. qm->io_base + HZIP_CORE_INT_MASK_REG);
  501. dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
  502. return;
  503. }
  504. nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
  505. ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
  506. /* clear ZIP hw error source if having */
  507. writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
  508. /* configure error type */
  509. writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
  510. writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
  511. writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
  512. hisi_zip_master_ooo_ctrl(qm, true);
  513. /* enable ZIP hw error interrupts */
  514. writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
  515. }
  516. static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
  517. {
  518. u32 nfe, ce;
  519. /* disable ZIP hw error interrupts */
  520. nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
  521. ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
  522. writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
  523. hisi_zip_master_ooo_ctrl(qm, false);
  524. }
  525. static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
  526. {
  527. struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
  528. return &hisi_zip->qm;
  529. }
  530. static u32 clear_enable_read(struct hisi_qm *qm)
  531. {
  532. return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
  533. HZIP_SOFT_CTRL_CNT_CLR_CE_BIT;
  534. }
  535. static int clear_enable_write(struct hisi_qm *qm, u32 val)
  536. {
  537. u32 tmp;
  538. if (val != 1 && val != 0)
  539. return -EINVAL;
  540. tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
  541. ~HZIP_SOFT_CTRL_CNT_CLR_CE_BIT) | val;
  542. writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
  543. return 0;
  544. }
  545. static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
  546. size_t count, loff_t *pos)
  547. {
  548. struct ctrl_debug_file *file = filp->private_data;
  549. struct hisi_qm *qm = file_to_qm(file);
  550. char tbuf[HZIP_BUF_SIZE];
  551. u32 val;
  552. int ret;
  553. ret = hisi_qm_get_dfx_access(qm);
  554. if (ret)
  555. return ret;
  556. spin_lock_irq(&file->lock);
  557. switch (file->index) {
  558. case HZIP_CLEAR_ENABLE:
  559. val = clear_enable_read(qm);
  560. break;
  561. default:
  562. goto err_input;
  563. }
  564. spin_unlock_irq(&file->lock);
  565. hisi_qm_put_dfx_access(qm);
  566. ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val);
  567. return simple_read_from_buffer(buf, count, pos, tbuf, ret);
  568. err_input:
  569. spin_unlock_irq(&file->lock);
  570. hisi_qm_put_dfx_access(qm);
  571. return -EINVAL;
  572. }
  573. static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
  574. const char __user *buf,
  575. size_t count, loff_t *pos)
  576. {
  577. struct ctrl_debug_file *file = filp->private_data;
  578. struct hisi_qm *qm = file_to_qm(file);
  579. char tbuf[HZIP_BUF_SIZE];
  580. unsigned long val;
  581. int len, ret;
  582. if (*pos != 0)
  583. return 0;
  584. if (count >= HZIP_BUF_SIZE)
  585. return -ENOSPC;
  586. len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count);
  587. if (len < 0)
  588. return len;
  589. tbuf[len] = '\0';
  590. ret = kstrtoul(tbuf, 0, &val);
  591. if (ret)
  592. return ret;
  593. ret = hisi_qm_get_dfx_access(qm);
  594. if (ret)
  595. return ret;
  596. spin_lock_irq(&file->lock);
  597. switch (file->index) {
  598. case HZIP_CLEAR_ENABLE:
  599. ret = clear_enable_write(qm, val);
  600. if (ret)
  601. goto err_input;
  602. break;
  603. default:
  604. ret = -EINVAL;
  605. goto err_input;
  606. }
  607. ret = count;
  608. err_input:
  609. spin_unlock_irq(&file->lock);
  610. hisi_qm_put_dfx_access(qm);
  611. return ret;
  612. }
  613. static const struct file_operations ctrl_debug_fops = {
  614. .owner = THIS_MODULE,
  615. .open = simple_open,
  616. .read = hisi_zip_ctrl_debug_read,
  617. .write = hisi_zip_ctrl_debug_write,
  618. };
  619. static int zip_debugfs_atomic64_set(void *data, u64 val)
  620. {
  621. if (val)
  622. return -EINVAL;
  623. atomic64_set((atomic64_t *)data, 0);
  624. return 0;
  625. }
  626. static int zip_debugfs_atomic64_get(void *data, u64 *val)
  627. {
  628. *val = atomic64_read((atomic64_t *)data);
  629. return 0;
  630. }
  631. DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
  632. zip_debugfs_atomic64_set, "%llu\n");
  633. static int hisi_zip_regs_show(struct seq_file *s, void *unused)
  634. {
  635. hisi_qm_regs_dump(s, s->private);
  636. return 0;
  637. }
  638. DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
  639. static int hisi_zip_core_debug_init(struct hisi_qm *qm)
  640. {
  641. u32 zip_core_num, zip_comp_core_num;
  642. struct device *dev = &qm->pdev->dev;
  643. struct debugfs_regset32 *regset;
  644. struct dentry *tmp_d;
  645. char buf[HZIP_BUF_SIZE];
  646. int i;
  647. zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
  648. zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
  649. qm->cap_ver);
  650. for (i = 0; i < zip_core_num; i++) {
  651. if (i < zip_comp_core_num)
  652. scnprintf(buf, sizeof(buf), "comp_core%d", i);
  653. else
  654. scnprintf(buf, sizeof(buf), "decomp_core%d",
  655. i - zip_comp_core_num);
  656. regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
  657. if (!regset)
  658. return -ENOENT;
  659. regset->regs = hzip_dfx_regs;
  660. regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
  661. regset->base = qm->io_base + core_offsets[i];
  662. regset->dev = dev;
  663. tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
  664. debugfs_create_file("regs", 0444, tmp_d, regset,
  665. &hisi_zip_regs_fops);
  666. }
  667. return 0;
  668. }
  669. static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
  670. {
  671. struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs;
  672. struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
  673. struct hisi_zip_dfx *dfx = &zip->dfx;
  674. struct dentry *tmp_dir;
  675. void *data;
  676. int i;
  677. tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
  678. for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
  679. data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
  680. debugfs_create_file(zip_dfx_files[i].name,
  681. 0644, tmp_dir, data,
  682. &zip_atomic64_ops);
  683. }
  684. if (qm->fun_type == QM_HW_PF && hzip_regs)
  685. debugfs_create_file("diff_regs", 0444, tmp_dir,
  686. qm, &hzip_diff_regs_fops);
  687. }
  688. static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
  689. {
  690. struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
  691. int i;
  692. for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) {
  693. spin_lock_init(&zip->ctrl->files[i].lock);
  694. zip->ctrl->files[i].ctrl = zip->ctrl;
  695. zip->ctrl->files[i].index = i;
  696. debugfs_create_file(ctrl_debug_file_name[i], 0600,
  697. qm->debug.debug_root,
  698. zip->ctrl->files + i,
  699. &ctrl_debug_fops);
  700. }
  701. return hisi_zip_core_debug_init(qm);
  702. }
  703. static int hisi_zip_debugfs_init(struct hisi_qm *qm)
  704. {
  705. struct device *dev = &qm->pdev->dev;
  706. struct dentry *dev_d;
  707. int ret;
  708. dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
  709. qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
  710. qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
  711. qm->debug.debug_root = dev_d;
  712. ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs));
  713. if (ret) {
  714. dev_warn(dev, "Failed to init ZIP diff regs!\n");
  715. goto debugfs_remove;
  716. }
  717. hisi_qm_debug_init(qm);
  718. if (qm->fun_type == QM_HW_PF) {
  719. ret = hisi_zip_ctrl_debug_init(qm);
  720. if (ret)
  721. goto failed_to_create;
  722. }
  723. hisi_zip_dfx_debug_init(qm);
  724. return 0;
  725. failed_to_create:
  726. hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
  727. debugfs_remove:
  728. debugfs_remove_recursive(hzip_debugfs_root);
  729. return ret;
  730. }
  731. /* hisi_zip_debug_regs_clear() - clear the zip debug regs */
  732. static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
  733. {
  734. int i, j;
  735. /* enable register read_clear bit */
  736. writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
  737. for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
  738. for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++)
  739. readl(qm->io_base + core_offsets[i] +
  740. hzip_dfx_regs[j].offset);
  741. /* disable register read_clear bit */
  742. writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
  743. hisi_qm_debug_regs_clear(qm);
  744. }
  745. static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
  746. {
  747. hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs));
  748. debugfs_remove_recursive(qm->debug.debug_root);
  749. if (qm->fun_type == QM_HW_PF) {
  750. hisi_zip_debug_regs_clear(qm);
  751. qm->debug.curr_qm_qp_num = 0;
  752. }
  753. }
  754. static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
  755. {
  756. int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
  757. int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
  758. struct qm_debug *debug = &qm->debug;
  759. void __iomem *io_base;
  760. u32 zip_core_num;
  761. int i, j, idx;
  762. zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
  763. debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
  764. sizeof(unsigned int), GFP_KERNEL);
  765. if (!debug->last_words)
  766. return -ENOMEM;
  767. for (i = 0; i < com_dfx_regs_num; i++) {
  768. io_base = qm->io_base + hzip_com_dfx_regs[i].offset;
  769. debug->last_words[i] = readl_relaxed(io_base);
  770. }
  771. for (i = 0; i < zip_core_num; i++) {
  772. io_base = qm->io_base + core_offsets[i];
  773. for (j = 0; j < core_dfx_regs_num; j++) {
  774. idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
  775. debug->last_words[idx] = readl_relaxed(
  776. io_base + hzip_dump_dfx_regs[j].offset);
  777. }
  778. }
  779. return 0;
  780. }
  781. static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm)
  782. {
  783. struct qm_debug *debug = &qm->debug;
  784. if (qm->fun_type == QM_HW_VF || !debug->last_words)
  785. return;
  786. kfree(debug->last_words);
  787. debug->last_words = NULL;
  788. }
  789. static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
  790. {
  791. int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
  792. int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
  793. u32 zip_core_num, zip_comp_core_num;
  794. struct qm_debug *debug = &qm->debug;
  795. char buf[HZIP_BUF_SIZE];
  796. void __iomem *base;
  797. int i, j, idx;
  798. u32 val;
  799. if (qm->fun_type == QM_HW_VF || !debug->last_words)
  800. return;
  801. for (i = 0; i < com_dfx_regs_num; i++) {
  802. val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
  803. if (debug->last_words[i] != val)
  804. pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
  805. hzip_com_dfx_regs[i].name, debug->last_words[i], val);
  806. }
  807. zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
  808. zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
  809. qm->cap_ver);
  810. for (i = 0; i < zip_core_num; i++) {
  811. if (i < zip_comp_core_num)
  812. scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
  813. else
  814. scnprintf(buf, sizeof(buf), "Decomp_core-%d",
  815. i - zip_comp_core_num);
  816. base = qm->io_base + core_offsets[i];
  817. pci_info(qm->pdev, "==>%s:\n", buf);
  818. /* dump last word for dfx regs during control resetting */
  819. for (j = 0; j < core_dfx_regs_num; j++) {
  820. idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
  821. val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
  822. if (debug->last_words[idx] != val)
  823. pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
  824. hzip_dump_dfx_regs[j].name,
  825. debug->last_words[idx], val);
  826. }
  827. }
  828. }
  829. static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
  830. {
  831. const struct hisi_zip_hw_error *err = zip_hw_error;
  832. struct device *dev = &qm->pdev->dev;
  833. u32 err_val;
  834. while (err->msg) {
  835. if (err->int_msk & err_sts) {
  836. dev_err(dev, "%s [error status=0x%x] found\n",
  837. err->msg, err->int_msk);
  838. if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
  839. err_val = readl(qm->io_base +
  840. HZIP_CORE_SRAM_ECC_ERR_INFO);
  841. dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
  842. ((err_val >>
  843. HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
  844. }
  845. }
  846. err++;
  847. }
  848. }
  849. static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
  850. {
  851. return readl(qm->io_base + HZIP_CORE_INT_STATUS);
  852. }
  853. static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
  854. {
  855. u32 nfe;
  856. writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
  857. nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
  858. writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
  859. }
  860. static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
  861. {
  862. u32 val;
  863. val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  864. writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
  865. qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  866. writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
  867. qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
  868. }
  869. static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
  870. {
  871. u32 nfe_enb;
  872. /* Disable ECC Mbit error report. */
  873. nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
  874. writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
  875. qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
  876. /* Inject zip ECC Mbit error to block master ooo. */
  877. writel(HZIP_CORE_INT_STATUS_M_ECC,
  878. qm->io_base + HZIP_CORE_INT_SET);
  879. }
  880. static void hisi_zip_err_info_init(struct hisi_qm *qm)
  881. {
  882. struct hisi_qm_err_info *err_info = &qm->err_info;
  883. err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
  884. err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
  885. err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  886. ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
  887. err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
  888. err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  889. ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  890. err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  891. ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
  892. err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  893. ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
  894. err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
  895. ZIP_RESET_MASK_CAP, qm->cap_ver);
  896. err_info->msi_wr_port = HZIP_WR_PORT;
  897. err_info->acpi_rst = "ZRST";
  898. }
  899. static const struct hisi_qm_err_ini hisi_zip_err_ini = {
  900. .hw_init = hisi_zip_set_user_domain_and_cache,
  901. .hw_err_enable = hisi_zip_hw_error_enable,
  902. .hw_err_disable = hisi_zip_hw_error_disable,
  903. .get_dev_hw_err_status = hisi_zip_get_hw_err_status,
  904. .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
  905. .log_dev_hw_err = hisi_zip_log_hw_error,
  906. .open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
  907. .close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
  908. .open_sva_prefetch = hisi_zip_open_sva_prefetch,
  909. .close_sva_prefetch = hisi_zip_close_sva_prefetch,
  910. .show_last_dfx_regs = hisi_zip_show_last_dfx_regs,
  911. .err_info_init = hisi_zip_err_info_init,
  912. };
  913. static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
  914. {
  915. struct hisi_qm *qm = &hisi_zip->qm;
  916. struct hisi_zip_ctrl *ctrl;
  917. int ret;
  918. ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
  919. if (!ctrl)
  920. return -ENOMEM;
  921. hisi_zip->ctrl = ctrl;
  922. ctrl->hisi_zip = hisi_zip;
  923. qm->err_ini = &hisi_zip_err_ini;
  924. qm->err_ini->err_info_init(qm);
  925. ret = hisi_zip_set_user_domain_and_cache(qm);
  926. if (ret)
  927. return ret;
  928. hisi_zip_open_sva_prefetch(qm);
  929. hisi_qm_dev_err_init(qm);
  930. hisi_zip_debug_regs_clear(qm);
  931. ret = hisi_zip_show_last_regs_init(qm);
  932. if (ret)
  933. pci_err(qm->pdev, "Failed to init last word regs!\n");
  934. return ret;
  935. }
  936. static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
  937. {
  938. int ret;
  939. qm->pdev = pdev;
  940. qm->ver = pdev->revision;
  941. qm->mode = uacce_mode;
  942. qm->sqe_size = HZIP_SQE_SIZE;
  943. qm->dev_name = hisi_zip_name;
  944. qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_ZIP_PF) ?
  945. QM_HW_PF : QM_HW_VF;
  946. if (qm->fun_type == QM_HW_PF) {
  947. qm->qp_base = HZIP_PF_DEF_Q_BASE;
  948. qm->qp_num = pf_q_num;
  949. qm->debug.curr_qm_qp_num = pf_q_num;
  950. qm->qm_list = &zip_devices;
  951. if (pf_q_num_flag)
  952. set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
  953. } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
  954. /*
  955. * have no way to get qm configure in VM in v1 hardware,
  956. * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
  957. * to trigger only one VF in v1 hardware.
  958. *
  959. * v2 hardware has no such problem.
  960. */
  961. qm->qp_base = HZIP_PF_DEF_Q_NUM;
  962. qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
  963. }
  964. ret = hisi_qm_init(qm);
  965. if (ret) {
  966. pci_err(qm->pdev, "Failed to init zip qm configures!\n");
  967. return ret;
  968. }
  969. ret = hisi_zip_set_qm_algs(qm);
  970. if (ret) {
  971. pci_err(qm->pdev, "Failed to set zip algs!\n");
  972. hisi_qm_uninit(qm);
  973. }
  974. return ret;
  975. }
  976. static void hisi_zip_qm_uninit(struct hisi_qm *qm)
  977. {
  978. hisi_qm_uninit(qm);
  979. }
  980. static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
  981. {
  982. u32 type_rate = HZIP_SHAPER_RATE_COMPRESS;
  983. struct hisi_qm *qm = &hisi_zip->qm;
  984. int ret;
  985. if (qm->fun_type == QM_HW_PF) {
  986. ret = hisi_zip_pf_probe_init(hisi_zip);
  987. if (ret)
  988. return ret;
  989. /* enable shaper type 0 */
  990. if (qm->ver >= QM_HW_V3) {
  991. type_rate |= QM_SHAPER_ENABLE;
  992. /* ZIP need to enable shaper type 1 */
  993. type_rate |= HZIP_SHAPER_RATE_DECOMPRESS << QM_SHAPER_TYPE1_OFFSET;
  994. qm->type_rate = type_rate;
  995. }
  996. }
  997. return 0;
  998. }
  999. static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  1000. {
  1001. struct hisi_zip *hisi_zip;
  1002. struct hisi_qm *qm;
  1003. int ret;
  1004. hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
  1005. if (!hisi_zip)
  1006. return -ENOMEM;
  1007. qm = &hisi_zip->qm;
  1008. ret = hisi_zip_qm_init(qm, pdev);
  1009. if (ret) {
  1010. pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
  1011. return ret;
  1012. }
  1013. ret = hisi_zip_probe_init(hisi_zip);
  1014. if (ret) {
  1015. pci_err(pdev, "Failed to probe (%d)!\n", ret);
  1016. goto err_qm_uninit;
  1017. }
  1018. ret = hisi_qm_start(qm);
  1019. if (ret)
  1020. goto err_dev_err_uninit;
  1021. ret = hisi_zip_debugfs_init(qm);
  1022. if (ret)
  1023. pci_err(pdev, "failed to init debugfs (%d)!\n", ret);
  1024. ret = hisi_qm_alg_register(qm, &zip_devices);
  1025. if (ret < 0) {
  1026. pci_err(pdev, "failed to register driver to crypto!\n");
  1027. goto err_qm_stop;
  1028. }
  1029. if (qm->uacce) {
  1030. ret = uacce_register(qm->uacce);
  1031. if (ret) {
  1032. pci_err(pdev, "failed to register uacce (%d)!\n", ret);
  1033. goto err_qm_alg_unregister;
  1034. }
  1035. }
  1036. if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
  1037. ret = hisi_qm_sriov_enable(pdev, vfs_num);
  1038. if (ret < 0)
  1039. goto err_qm_alg_unregister;
  1040. }
  1041. hisi_qm_pm_init(qm);
  1042. return 0;
  1043. err_qm_alg_unregister:
  1044. hisi_qm_alg_unregister(qm, &zip_devices);
  1045. err_qm_stop:
  1046. hisi_zip_debugfs_exit(qm);
  1047. hisi_qm_stop(qm, QM_NORMAL);
  1048. err_dev_err_uninit:
  1049. hisi_zip_show_last_regs_uninit(qm);
  1050. hisi_qm_dev_err_uninit(qm);
  1051. err_qm_uninit:
  1052. hisi_zip_qm_uninit(qm);
  1053. return ret;
  1054. }
  1055. static void hisi_zip_remove(struct pci_dev *pdev)
  1056. {
  1057. struct hisi_qm *qm = pci_get_drvdata(pdev);
  1058. hisi_qm_pm_uninit(qm);
  1059. hisi_qm_wait_task_finish(qm, &zip_devices);
  1060. hisi_qm_alg_unregister(qm, &zip_devices);
  1061. if (qm->fun_type == QM_HW_PF && qm->vfs_num)
  1062. hisi_qm_sriov_disable(pdev, true);
  1063. hisi_zip_debugfs_exit(qm);
  1064. hisi_qm_stop(qm, QM_NORMAL);
  1065. hisi_zip_show_last_regs_uninit(qm);
  1066. hisi_qm_dev_err_uninit(qm);
  1067. hisi_zip_qm_uninit(qm);
  1068. }
  1069. static const struct dev_pm_ops hisi_zip_pm_ops = {
  1070. SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
  1071. };
  1072. static const struct pci_error_handlers hisi_zip_err_handler = {
  1073. .error_detected = hisi_qm_dev_err_detected,
  1074. .slot_reset = hisi_qm_dev_slot_reset,
  1075. .reset_prepare = hisi_qm_reset_prepare,
  1076. .reset_done = hisi_qm_reset_done,
  1077. };
  1078. static struct pci_driver hisi_zip_pci_driver = {
  1079. .name = "hisi_zip",
  1080. .id_table = hisi_zip_dev_ids,
  1081. .probe = hisi_zip_probe,
  1082. .remove = hisi_zip_remove,
  1083. .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
  1084. hisi_qm_sriov_configure : NULL,
  1085. .err_handler = &hisi_zip_err_handler,
  1086. .shutdown = hisi_qm_dev_shutdown,
  1087. .driver.pm = &hisi_zip_pm_ops,
  1088. };
  1089. struct pci_driver *hisi_zip_get_pf_driver(void)
  1090. {
  1091. return &hisi_zip_pci_driver;
  1092. }
  1093. EXPORT_SYMBOL_GPL(hisi_zip_get_pf_driver);
  1094. static void hisi_zip_register_debugfs(void)
  1095. {
  1096. if (!debugfs_initialized())
  1097. return;
  1098. hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
  1099. }
  1100. static void hisi_zip_unregister_debugfs(void)
  1101. {
  1102. debugfs_remove_recursive(hzip_debugfs_root);
  1103. }
  1104. static int __init hisi_zip_init(void)
  1105. {
  1106. int ret;
  1107. hisi_qm_init_list(&zip_devices);
  1108. hisi_zip_register_debugfs();
  1109. ret = pci_register_driver(&hisi_zip_pci_driver);
  1110. if (ret < 0) {
  1111. hisi_zip_unregister_debugfs();
  1112. pr_err("Failed to register pci driver.\n");
  1113. }
  1114. return ret;
  1115. }
  1116. static void __exit hisi_zip_exit(void)
  1117. {
  1118. pci_unregister_driver(&hisi_zip_pci_driver);
  1119. hisi_zip_unregister_debugfs();
  1120. }
  1121. module_init(hisi_zip_init);
  1122. module_exit(hisi_zip_exit);
  1123. MODULE_LICENSE("GPL v2");
  1124. MODULE_AUTHOR("Zhou Wang <[email protected]>");
  1125. MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");