otx2_cptlf.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (C) 2020 Marvell. */
  3. #include "otx2_cpt_common.h"
  4. #include "otx2_cptlf.h"
  5. #include "rvu_reg.h"
  6. #define CPT_TIMER_HOLD 0x03F
  7. #define CPT_COUNT_HOLD 32
  8. static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
  9. int time_wait)
  10. {
  11. union otx2_cptx_lf_done_wait done_wait;
  12. done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  13. OTX2_CPT_LF_DONE_WAIT);
  14. done_wait.s.time_wait = time_wait;
  15. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  16. OTX2_CPT_LF_DONE_WAIT, done_wait.u);
  17. }
  18. static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
  19. {
  20. union otx2_cptx_lf_done_wait done_wait;
  21. done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  22. OTX2_CPT_LF_DONE_WAIT);
  23. done_wait.s.num_wait = num_wait;
  24. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  25. OTX2_CPT_LF_DONE_WAIT, done_wait.u);
  26. }
  27. static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
  28. int time_wait)
  29. {
  30. int slot;
  31. for (slot = 0; slot < lfs->lfs_num; slot++)
  32. cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
  33. }
  34. static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
  35. {
  36. int slot;
  37. for (slot = 0; slot < lfs->lfs_num; slot++)
  38. cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
  39. }
  40. static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
  41. {
  42. struct otx2_cptlfs_info *lfs = lf->lfs;
  43. union otx2_cptx_af_lf_ctrl lf_ctrl;
  44. int ret;
  45. ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
  46. CPT_AF_LFX_CTL(lf->slot),
  47. &lf_ctrl.u, lfs->blkaddr);
  48. if (ret)
  49. return ret;
  50. lf_ctrl.s.pri = pri ? 1 : 0;
  51. ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
  52. CPT_AF_LFX_CTL(lf->slot),
  53. lf_ctrl.u, lfs->blkaddr);
  54. return ret;
  55. }
  56. static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
  57. int eng_grps_mask)
  58. {
  59. struct otx2_cptlfs_info *lfs = lf->lfs;
  60. union otx2_cptx_af_lf_ctrl lf_ctrl;
  61. int ret;
  62. ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
  63. CPT_AF_LFX_CTL(lf->slot),
  64. &lf_ctrl.u, lfs->blkaddr);
  65. if (ret)
  66. return ret;
  67. lf_ctrl.s.grp = eng_grps_mask;
  68. ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
  69. CPT_AF_LFX_CTL(lf->slot),
  70. lf_ctrl.u, lfs->blkaddr);
  71. return ret;
  72. }
  73. static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
  74. int eng_grp_mask, int pri)
  75. {
  76. int slot, ret = 0;
  77. for (slot = 0; slot < lfs->lfs_num; slot++) {
  78. ret = cptlf_set_pri(&lfs->lf[slot], pri);
  79. if (ret)
  80. return ret;
  81. ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
  82. if (ret)
  83. return ret;
  84. }
  85. return ret;
  86. }
  87. static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
  88. {
  89. /* Disable instruction queues */
  90. otx2_cptlf_disable_iqueues(lfs);
  91. /* Set instruction queues base addresses */
  92. otx2_cptlf_set_iqueues_base_addr(lfs);
  93. /* Set instruction queues sizes */
  94. otx2_cptlf_set_iqueues_size(lfs);
  95. /* Set done interrupts time wait */
  96. cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
  97. /* Set done interrupts num wait */
  98. cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
  99. /* Enable instruction queues */
  100. otx2_cptlf_enable_iqueues(lfs);
  101. }
  102. static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
  103. {
  104. /* Disable instruction queues */
  105. otx2_cptlf_disable_iqueues(lfs);
  106. }
  107. static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
  108. {
  109. union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
  110. u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
  111. OTX2_CPT_LF_MISC_INT_ENA_W1C;
  112. int slot;
  113. irq_misc.s.fault = 0x1;
  114. irq_misc.s.hwerr = 0x1;
  115. irq_misc.s.irde = 0x1;
  116. irq_misc.s.nqerr = 0x1;
  117. irq_misc.s.nwrp = 0x1;
  118. for (slot = 0; slot < lfs->lfs_num; slot++)
  119. otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
  120. irq_misc.u);
  121. }
  122. static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
  123. {
  124. int slot;
  125. /* Enable done interrupts */
  126. for (slot = 0; slot < lfs->lfs_num; slot++)
  127. otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
  128. OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
  129. /* Enable Misc interrupts */
  130. cptlf_set_misc_intrs(lfs, true);
  131. }
  132. static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
  133. {
  134. int slot;
  135. for (slot = 0; slot < lfs->lfs_num; slot++)
  136. otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
  137. OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
  138. cptlf_set_misc_intrs(lfs, false);
  139. }
  140. static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
  141. {
  142. union otx2_cptx_lf_done irq_cnt;
  143. irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  144. OTX2_CPT_LF_DONE);
  145. return irq_cnt.s.done;
  146. }
  147. static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
  148. {
  149. union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
  150. struct otx2_cptlf_info *lf = arg;
  151. struct device *dev;
  152. dev = &lf->lfs->pdev->dev;
  153. irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  154. OTX2_CPT_LF_MISC_INT);
  155. irq_misc_ack.u = 0x0;
  156. if (irq_misc.s.fault) {
  157. dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
  158. lf->slot);
  159. irq_misc_ack.s.fault = 0x1;
  160. } else if (irq_misc.s.hwerr) {
  161. dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
  162. lf->slot);
  163. irq_misc_ack.s.hwerr = 0x1;
  164. } else if (irq_misc.s.nwrp) {
  165. dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
  166. lf->slot);
  167. irq_misc_ack.s.nwrp = 0x1;
  168. } else if (irq_misc.s.irde) {
  169. dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
  170. irq_misc_ack.s.irde = 0x1;
  171. } else if (irq_misc.s.nqerr) {
  172. dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
  173. irq_misc_ack.s.nqerr = 0x1;
  174. } else {
  175. dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
  176. return IRQ_NONE;
  177. }
  178. /* Acknowledge interrupts */
  179. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  180. OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
  181. return IRQ_HANDLED;
  182. }
  183. static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
  184. {
  185. union otx2_cptx_lf_done_wait done_wait;
  186. struct otx2_cptlf_info *lf = arg;
  187. int irq_cnt;
  188. /* Read the number of completed requests */
  189. irq_cnt = cptlf_read_done_cnt(lf);
  190. if (irq_cnt) {
  191. done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
  192. lf->slot, OTX2_CPT_LF_DONE_WAIT);
  193. /* Acknowledge the number of completed requests */
  194. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  195. OTX2_CPT_LF_DONE_ACK, irq_cnt);
  196. otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  197. OTX2_CPT_LF_DONE_WAIT, done_wait.u);
  198. if (unlikely(!lf->wqe)) {
  199. dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
  200. lf->slot);
  201. return IRQ_NONE;
  202. }
  203. /* Schedule processing of completed requests */
  204. tasklet_hi_schedule(&lf->wqe->work);
  205. }
  206. return IRQ_HANDLED;
  207. }
  208. void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
  209. {
  210. int i, offs, vector;
  211. for (i = 0; i < lfs->lfs_num; i++) {
  212. for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
  213. if (!lfs->lf[i].is_irq_reg[offs])
  214. continue;
  215. vector = pci_irq_vector(lfs->pdev,
  216. lfs->lf[i].msix_offset + offs);
  217. free_irq(vector, &lfs->lf[i]);
  218. lfs->lf[i].is_irq_reg[offs] = false;
  219. }
  220. }
  221. cptlf_disable_intrs(lfs);
  222. }
  223. EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts,
  224. CRYPTO_DEV_OCTEONTX2_CPT);
  225. static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
  226. int lf_num, int irq_offset,
  227. irq_handler_t handler)
  228. {
  229. int ret, vector;
  230. vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
  231. irq_offset);
  232. ret = request_irq(vector, handler, 0,
  233. lfs->lf[lf_num].irq_name[irq_offset],
  234. &lfs->lf[lf_num]);
  235. if (ret)
  236. return ret;
  237. lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
  238. return ret;
  239. }
  240. int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
  241. {
  242. int irq_offs, ret, i;
  243. for (i = 0; i < lfs->lfs_num; i++) {
  244. irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
  245. snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
  246. ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
  247. cptlf_misc_intr_handler);
  248. if (ret)
  249. goto free_irq;
  250. irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
  251. snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
  252. i);
  253. ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
  254. cptlf_done_intr_handler);
  255. if (ret)
  256. goto free_irq;
  257. }
  258. cptlf_enable_intrs(lfs);
  259. return 0;
  260. free_irq:
  261. otx2_cptlf_unregister_interrupts(lfs);
  262. return ret;
  263. }
  264. EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT);
  265. void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
  266. {
  267. int slot, offs;
  268. for (slot = 0; slot < lfs->lfs_num; slot++) {
  269. for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
  270. irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
  271. lfs->lf[slot].msix_offset +
  272. offs), NULL);
  273. free_cpumask_var(lfs->lf[slot].affinity_mask);
  274. }
  275. }
  276. EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
  277. int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
  278. {
  279. struct otx2_cptlf_info *lf = lfs->lf;
  280. int slot, offs, ret;
  281. for (slot = 0; slot < lfs->lfs_num; slot++) {
  282. if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
  283. dev_err(&lfs->pdev->dev,
  284. "cpumask allocation failed for LF %d", slot);
  285. ret = -ENOMEM;
  286. goto free_affinity_mask;
  287. }
  288. cpumask_set_cpu(cpumask_local_spread(slot,
  289. dev_to_node(&lfs->pdev->dev)),
  290. lf[slot].affinity_mask);
  291. for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
  292. ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
  293. lf[slot].msix_offset + offs),
  294. lf[slot].affinity_mask);
  295. if (ret)
  296. goto free_affinity_mask;
  297. }
  298. }
  299. return 0;
  300. free_affinity_mask:
  301. otx2_cptlf_free_irqs_affinity(lfs);
  302. return ret;
  303. }
  304. EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
  305. int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
  306. int lfs_num)
  307. {
  308. int slot, ret;
  309. if (!lfs->pdev || !lfs->reg_base)
  310. return -EINVAL;
  311. lfs->lfs_num = lfs_num;
  312. for (slot = 0; slot < lfs->lfs_num; slot++) {
  313. lfs->lf[slot].lfs = lfs;
  314. lfs->lf[slot].slot = slot;
  315. if (lfs->lmt_base)
  316. lfs->lf[slot].lmtline = lfs->lmt_base +
  317. (slot * LMTLINE_SIZE);
  318. else
  319. lfs->lf[slot].lmtline = lfs->reg_base +
  320. OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
  321. OTX2_CPT_LMT_LF_LMTLINEX(0));
  322. lfs->lf[slot].ioreg = lfs->reg_base +
  323. OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
  324. OTX2_CPT_LF_NQX(0));
  325. }
  326. /* Send request to attach LFs */
  327. ret = otx2_cpt_attach_rscrs_msg(lfs);
  328. if (ret)
  329. goto clear_lfs_num;
  330. ret = otx2_cpt_alloc_instruction_queues(lfs);
  331. if (ret) {
  332. dev_err(&lfs->pdev->dev,
  333. "Allocating instruction queues failed\n");
  334. goto detach_rsrcs;
  335. }
  336. cptlf_hw_init(lfs);
  337. /*
  338. * Allow each LF to execute requests destined to any of 8 engine
  339. * groups and set queue priority of each LF to high
  340. */
  341. ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
  342. if (ret)
  343. goto free_iq;
  344. return 0;
  345. free_iq:
  346. otx2_cpt_free_instruction_queues(lfs);
  347. cptlf_hw_cleanup(lfs);
  348. detach_rsrcs:
  349. otx2_cpt_detach_rsrcs_msg(lfs);
  350. clear_lfs_num:
  351. lfs->lfs_num = 0;
  352. return ret;
  353. }
  354. EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
  355. void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
  356. {
  357. lfs->lfs_num = 0;
  358. /* Cleanup LFs hardware side */
  359. cptlf_hw_cleanup(lfs);
  360. /* Send request to detach LFs */
  361. otx2_cpt_detach_rsrcs_msg(lfs);
  362. }
  363. EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
  364. MODULE_AUTHOR("Marvell");
  365. MODULE_DESCRIPTION("Marvell RVU CPT Common module");
  366. MODULE_LICENSE("GPL");