hisi_acc_qm.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #ifndef HISI_ACC_QM_H
  4. #define HISI_ACC_QM_H
  5. #include <linux/bitfield.h>
  6. #include <linux/debugfs.h>
  7. #include <linux/iopoll.h>
  8. #include <linux/module.h>
  9. #include <linux/pci.h>
  10. #define QM_QNUM_V1 4096
  11. #define QM_QNUM_V2 1024
  12. #define QM_MAX_VFS_NUM_V2 63
  13. /* qm user domain */
  14. #define QM_ARUSER_M_CFG_1 0x100088
  15. #define AXUSER_SNOOP_ENABLE BIT(30)
  16. #define AXUSER_CMD_TYPE GENMASK(14, 12)
  17. #define AXUSER_CMD_SMMU_NORMAL 1
  18. #define AXUSER_NS BIT(6)
  19. #define AXUSER_NO BIT(5)
  20. #define AXUSER_FP BIT(4)
  21. #define AXUSER_SSV BIT(0)
  22. #define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
  23. FIELD_PREP(AXUSER_CMD_TYPE, \
  24. AXUSER_CMD_SMMU_NORMAL) | \
  25. AXUSER_NS | AXUSER_NO | AXUSER_FP)
  26. #define QM_ARUSER_M_CFG_ENABLE 0x100090
  27. #define ARUSER_M_CFG_ENABLE 0xfffffffe
  28. #define QM_AWUSER_M_CFG_1 0x100098
  29. #define QM_AWUSER_M_CFG_ENABLE 0x1000a0
  30. #define AWUSER_M_CFG_ENABLE 0xfffffffe
  31. #define QM_WUSER_M_CFG_ENABLE 0x1000a8
  32. #define WUSER_M_CFG_ENABLE 0xffffffff
  33. /* mailbox */
  34. #define QM_MB_CMD_SQC 0x0
  35. #define QM_MB_CMD_CQC 0x1
  36. #define QM_MB_CMD_EQC 0x2
  37. #define QM_MB_CMD_AEQC 0x3
  38. #define QM_MB_CMD_SQC_BT 0x4
  39. #define QM_MB_CMD_CQC_BT 0x5
  40. #define QM_MB_CMD_SQC_VFT_V2 0x6
  41. #define QM_MB_CMD_STOP_QP 0x8
  42. #define QM_MB_CMD_SRC 0xc
  43. #define QM_MB_CMD_DST 0xd
  44. #define QM_MB_CMD_SEND_BASE 0x300
  45. #define QM_MB_EVENT_SHIFT 8
  46. #define QM_MB_BUSY_SHIFT 13
  47. #define QM_MB_OP_SHIFT 14
  48. #define QM_MB_CMD_DATA_ADDR_L 0x304
  49. #define QM_MB_CMD_DATA_ADDR_H 0x308
  50. #define QM_MB_MAX_WAIT_CNT 6000
  51. /* doorbell */
  52. #define QM_DOORBELL_CMD_SQ 0
  53. #define QM_DOORBELL_CMD_CQ 1
  54. #define QM_DOORBELL_CMD_EQ 2
  55. #define QM_DOORBELL_CMD_AEQ 3
  56. #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
  57. #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
  58. #define QM_QP_MAX_NUM_SHIFT 11
  59. #define QM_DB_CMD_SHIFT_V2 12
  60. #define QM_DB_RAND_SHIFT_V2 16
  61. #define QM_DB_INDEX_SHIFT_V2 32
  62. #define QM_DB_PRIORITY_SHIFT_V2 48
  63. #define QM_VF_STATE 0x60
  64. /* qm cache */
  65. #define QM_CACHE_CTL 0x100050
  66. #define SQC_CACHE_ENABLE BIT(0)
  67. #define CQC_CACHE_ENABLE BIT(1)
  68. #define SQC_CACHE_WB_ENABLE BIT(4)
  69. #define SQC_CACHE_WB_THRD GENMASK(10, 5)
  70. #define CQC_CACHE_WB_ENABLE BIT(11)
  71. #define CQC_CACHE_WB_THRD GENMASK(17, 12)
  72. #define QM_AXI_M_CFG 0x1000ac
  73. #define AXI_M_CFG 0xffff
  74. #define QM_AXI_M_CFG_ENABLE 0x1000b0
  75. #define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
  76. #define AXI_M_CFG_ENABLE 0xffffffff
  77. #define QM_PEH_AXUSER_CFG 0x1000cc
  78. #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
  79. #define PEH_AXUSER_CFG 0x401001
  80. #define PEH_AXUSER_CFG_ENABLE 0xffffffff
  81. #define QM_MIN_QNUM 2
  82. #define HISI_ACC_SGL_SGE_NR_MAX 255
  83. #define QM_SHAPER_CFG 0x100164
  84. #define QM_SHAPER_ENABLE BIT(30)
  85. #define QM_SHAPER_TYPE1_OFFSET 10
  86. /* page number for queue file region */
  87. #define QM_DOORBELL_PAGE_NR 1
  88. /* uacce mode of the driver */
  89. #define UACCE_MODE_NOUACCE 0 /* don't use uacce */
  90. #define UACCE_MODE_SVA 1 /* use uacce sva mode */
  91. #define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
  92. enum qm_stop_reason {
  93. QM_NORMAL,
  94. QM_SOFT_RESET,
  95. QM_FLR,
  96. };
  97. enum qm_state {
  98. QM_INIT = 0,
  99. QM_START,
  100. QM_CLOSE,
  101. QM_STOP,
  102. };
  103. enum qp_state {
  104. QP_INIT = 1,
  105. QP_START,
  106. QP_STOP,
  107. QP_CLOSE,
  108. };
  109. enum qm_hw_ver {
  110. QM_HW_UNKNOWN = -1,
  111. QM_HW_V1 = 0x20,
  112. QM_HW_V2 = 0x21,
  113. QM_HW_V3 = 0x30,
  114. };
  115. enum qm_fun_type {
  116. QM_HW_PF,
  117. QM_HW_VF,
  118. };
  119. enum qm_debug_file {
  120. CURRENT_QM,
  121. CURRENT_Q,
  122. CLEAR_ENABLE,
  123. DEBUG_FILE_NUM,
  124. };
  125. enum qm_vf_state {
  126. QM_READY = 0,
  127. QM_NOT_READY,
  128. };
  129. enum qm_misc_ctl_bits {
  130. QM_DRIVER_REMOVING = 0x0,
  131. QM_RST_SCHED,
  132. QM_RESETTING,
  133. QM_MODULE_PARAM,
  134. };
  135. enum qm_cap_bits {
  136. QM_SUPPORT_DB_ISOLATION = 0x0,
  137. QM_SUPPORT_FUNC_QOS,
  138. QM_SUPPORT_STOP_QP,
  139. QM_SUPPORT_MB_COMMAND,
  140. QM_SUPPORT_SVA_PREFETCH,
  141. QM_SUPPORT_RPM,
  142. };
  143. struct dfx_diff_registers {
  144. u32 *regs;
  145. u32 reg_offset;
  146. u32 reg_len;
  147. };
  148. struct qm_dfx {
  149. atomic64_t err_irq_cnt;
  150. atomic64_t aeq_irq_cnt;
  151. atomic64_t abnormal_irq_cnt;
  152. atomic64_t create_qp_err_cnt;
  153. atomic64_t mb_err_cnt;
  154. };
  155. struct debugfs_file {
  156. enum qm_debug_file index;
  157. struct mutex lock;
  158. struct qm_debug *debug;
  159. };
  160. struct qm_debug {
  161. u32 curr_qm_qp_num;
  162. u32 sqe_mask_offset;
  163. u32 sqe_mask_len;
  164. struct qm_dfx dfx;
  165. struct dentry *debug_root;
  166. struct dentry *qm_d;
  167. struct debugfs_file files[DEBUG_FILE_NUM];
  168. unsigned int *qm_last_words;
  169. /* ACC engines recoreding last regs */
  170. unsigned int *last_words;
  171. struct dfx_diff_registers *qm_diff_regs;
  172. struct dfx_diff_registers *acc_diff_regs;
  173. };
  174. struct qm_shaper_factor {
  175. u32 func_qos;
  176. u64 cir_b;
  177. u64 cir_u;
  178. u64 cir_s;
  179. u64 cbs_s;
  180. };
  181. struct qm_dma {
  182. void *va;
  183. dma_addr_t dma;
  184. size_t size;
  185. };
  186. struct hisi_qm_status {
  187. u32 eq_head;
  188. bool eqc_phase;
  189. u32 aeq_head;
  190. bool aeqc_phase;
  191. atomic_t flags;
  192. int stop_reason;
  193. };
  194. struct hisi_qm;
  195. struct hisi_qm_err_info {
  196. char *acpi_rst;
  197. u32 msi_wr_port;
  198. u32 ecc_2bits_mask;
  199. u32 qm_shutdown_mask;
  200. u32 dev_shutdown_mask;
  201. u32 qm_reset_mask;
  202. u32 dev_reset_mask;
  203. u32 ce;
  204. u32 nfe;
  205. u32 fe;
  206. };
  207. struct hisi_qm_err_status {
  208. u32 is_qm_ecc_mbit;
  209. u32 is_dev_ecc_mbit;
  210. };
  211. struct hisi_qm_err_ini {
  212. int (*hw_init)(struct hisi_qm *qm);
  213. void (*hw_err_enable)(struct hisi_qm *qm);
  214. void (*hw_err_disable)(struct hisi_qm *qm);
  215. u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
  216. void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
  217. void (*open_axi_master_ooo)(struct hisi_qm *qm);
  218. void (*close_axi_master_ooo)(struct hisi_qm *qm);
  219. void (*open_sva_prefetch)(struct hisi_qm *qm);
  220. void (*close_sva_prefetch)(struct hisi_qm *qm);
  221. void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
  222. void (*show_last_dfx_regs)(struct hisi_qm *qm);
  223. void (*err_info_init)(struct hisi_qm *qm);
  224. };
  225. struct hisi_qm_cap_info {
  226. u32 type;
  227. /* Register offset */
  228. u32 offset;
  229. /* Bit offset in register */
  230. u32 shift;
  231. u32 mask;
  232. u32 v1_val;
  233. u32 v2_val;
  234. u32 v3_val;
  235. };
  236. struct hisi_qm_list {
  237. struct mutex lock;
  238. struct list_head list;
  239. int (*register_to_crypto)(struct hisi_qm *qm);
  240. void (*unregister_from_crypto)(struct hisi_qm *qm);
  241. };
  242. struct hisi_qm_poll_data {
  243. struct hisi_qm *qm;
  244. struct work_struct work;
  245. u16 *qp_finish_id;
  246. };
  247. struct hisi_qm {
  248. enum qm_hw_ver ver;
  249. enum qm_fun_type fun_type;
  250. const char *dev_name;
  251. struct pci_dev *pdev;
  252. void __iomem *io_base;
  253. void __iomem *db_io_base;
  254. /* Capbility version, 0: not supports */
  255. u32 cap_ver;
  256. u32 sqe_size;
  257. u32 qp_base;
  258. u32 qp_num;
  259. u32 qp_in_used;
  260. u32 ctrl_qp_num;
  261. u32 max_qp_num;
  262. u32 vfs_num;
  263. u32 db_interval;
  264. u16 eq_depth;
  265. u16 aeq_depth;
  266. struct list_head list;
  267. struct hisi_qm_list *qm_list;
  268. struct qm_dma qdma;
  269. struct qm_sqc *sqc;
  270. struct qm_cqc *cqc;
  271. struct qm_eqe *eqe;
  272. struct qm_aeqe *aeqe;
  273. dma_addr_t sqc_dma;
  274. dma_addr_t cqc_dma;
  275. dma_addr_t eqe_dma;
  276. dma_addr_t aeqe_dma;
  277. struct hisi_qm_status status;
  278. const struct hisi_qm_err_ini *err_ini;
  279. struct hisi_qm_err_info err_info;
  280. struct hisi_qm_err_status err_status;
  281. unsigned long misc_ctl; /* driver removing and reset sched */
  282. /* Device capability bit */
  283. unsigned long caps;
  284. struct rw_semaphore qps_lock;
  285. struct idr qp_idr;
  286. struct hisi_qp *qp_array;
  287. struct hisi_qm_poll_data *poll_data;
  288. struct mutex mailbox_lock;
  289. const struct hisi_qm_hw_ops *ops;
  290. struct qm_debug debug;
  291. u32 error_mask;
  292. struct workqueue_struct *wq;
  293. struct work_struct rst_work;
  294. struct work_struct cmd_process;
  295. const char *algs;
  296. bool use_sva;
  297. bool is_frozen;
  298. resource_size_t phys_base;
  299. resource_size_t db_phys_base;
  300. struct uacce_device *uacce;
  301. int mode;
  302. struct qm_shaper_factor *factor;
  303. u32 mb_qos;
  304. u32 type_rate;
  305. };
  306. struct hisi_qp_status {
  307. atomic_t used;
  308. u16 sq_tail;
  309. u16 cq_head;
  310. bool cqc_phase;
  311. atomic_t flags;
  312. };
  313. struct hisi_qp_ops {
  314. int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
  315. };
  316. struct hisi_qp {
  317. u32 qp_id;
  318. u16 sq_depth;
  319. u16 cq_depth;
  320. u8 alg_type;
  321. u8 req_type;
  322. struct qm_dma qdma;
  323. void *sqe;
  324. struct qm_cqe *cqe;
  325. dma_addr_t sqe_dma;
  326. dma_addr_t cqe_dma;
  327. struct hisi_qp_status qp_status;
  328. struct hisi_qp_ops *hw_ops;
  329. void *qp_ctx;
  330. void (*req_cb)(struct hisi_qp *qp, void *data);
  331. void (*event_cb)(struct hisi_qp *qp);
  332. struct hisi_qm *qm;
  333. bool is_resetting;
  334. bool is_in_kernel;
  335. u16 pasid;
  336. struct uacce_queue *uacce_q;
  337. };
  338. static inline int q_num_set(const char *val, const struct kernel_param *kp,
  339. unsigned int device)
  340. {
  341. struct pci_dev *pdev;
  342. u32 n, q_num;
  343. int ret;
  344. if (!val)
  345. return -EINVAL;
  346. pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
  347. if (!pdev) {
  348. q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
  349. pr_info("No device found currently, suppose queue number is %u\n",
  350. q_num);
  351. } else {
  352. if (pdev->revision == QM_HW_V1)
  353. q_num = QM_QNUM_V1;
  354. else
  355. q_num = QM_QNUM_V2;
  356. pci_dev_put(pdev);
  357. }
  358. ret = kstrtou32(val, 10, &n);
  359. if (ret || n < QM_MIN_QNUM || n > q_num)
  360. return -EINVAL;
  361. return param_set_int(val, kp);
  362. }
  363. static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
  364. {
  365. u32 n;
  366. int ret;
  367. if (!val)
  368. return -EINVAL;
  369. ret = kstrtou32(val, 10, &n);
  370. if (ret < 0)
  371. return ret;
  372. if (n > QM_MAX_VFS_NUM_V2)
  373. return -EINVAL;
  374. return param_set_int(val, kp);
  375. }
  376. static inline int mode_set(const char *val, const struct kernel_param *kp)
  377. {
  378. u32 n;
  379. int ret;
  380. if (!val)
  381. return -EINVAL;
  382. ret = kstrtou32(val, 10, &n);
  383. if (ret != 0 || (n != UACCE_MODE_SVA &&
  384. n != UACCE_MODE_NOUACCE))
  385. return -EINVAL;
  386. return param_set_int(val, kp);
  387. }
  388. static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
  389. {
  390. return mode_set(val, kp);
  391. }
  392. static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
  393. {
  394. INIT_LIST_HEAD(&qm_list->list);
  395. mutex_init(&qm_list->lock);
  396. }
  397. int hisi_qm_init(struct hisi_qm *qm);
  398. void hisi_qm_uninit(struct hisi_qm *qm);
  399. int hisi_qm_start(struct hisi_qm *qm);
  400. int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
  401. int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
  402. int hisi_qm_stop_qp(struct hisi_qp *qp);
  403. int hisi_qp_send(struct hisi_qp *qp, const void *msg);
  404. void hisi_qm_debug_init(struct hisi_qm *qm);
  405. void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
  406. int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
  407. int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
  408. int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
  409. void hisi_qm_dev_err_init(struct hisi_qm *qm);
  410. void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
  411. int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
  412. struct dfx_diff_registers *dregs, u32 reg_len);
  413. void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
  414. void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
  415. struct dfx_diff_registers *dregs, u32 regs_len);
  416. pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
  417. pci_channel_state_t state);
  418. pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
  419. void hisi_qm_reset_prepare(struct pci_dev *pdev);
  420. void hisi_qm_reset_done(struct pci_dev *pdev);
  421. int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
  422. int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
  423. bool op);
  424. struct hisi_acc_sgl_pool;
  425. struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
  426. struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
  427. u32 index, dma_addr_t *hw_sgl_dma);
  428. void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
  429. struct hisi_acc_hw_sgl *hw_sgl);
  430. struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
  431. u32 count, u32 sge_nr);
  432. void hisi_acc_free_sgl_pool(struct device *dev,
  433. struct hisi_acc_sgl_pool *pool);
  434. int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
  435. u8 alg_type, int node, struct hisi_qp **qps);
  436. void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
  437. void hisi_qm_dev_shutdown(struct pci_dev *pdev);
  438. void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
  439. int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
  440. void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
  441. int hisi_qm_resume(struct device *dev);
  442. int hisi_qm_suspend(struct device *dev);
  443. void hisi_qm_pm_uninit(struct hisi_qm *qm);
  444. void hisi_qm_pm_init(struct hisi_qm *qm);
  445. int hisi_qm_get_dfx_access(struct hisi_qm *qm);
  446. void hisi_qm_put_dfx_access(struct hisi_qm *qm);
  447. void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
  448. u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
  449. const struct hisi_qm_cap_info *info_table,
  450. u32 index, bool is_read);
  451. /* Used by VFIO ACC live migration driver */
  452. struct pci_driver *hisi_sec_get_pf_driver(void);
  453. struct pci_driver *hisi_hpre_get_pf_driver(void);
  454. struct pci_driver *hisi_zip_get_pf_driver(void);
  455. #endif