cptpf_mbox.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2016 Cavium, Inc.
  4. */
  5. #include <linux/module.h>
  6. #include "cptpf.h"
  7. static void cpt_send_msg_to_vf(struct cpt_device *cpt, int vf,
  8. struct cpt_mbox *mbx)
  9. {
  10. /* Writing mbox(0) causes interrupt */
  11. cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1),
  12. mbx->data);
  13. cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), mbx->msg);
  14. }
  15. /* ACKs VF's mailbox message
  16. * @vf: VF to which ACK to be sent
  17. */
  18. static void cpt_mbox_send_ack(struct cpt_device *cpt, int vf,
  19. struct cpt_mbox *mbx)
  20. {
  21. mbx->data = 0ull;
  22. mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
  23. cpt_send_msg_to_vf(cpt, vf, mbx);
  24. }
  25. static void cpt_clear_mbox_intr(struct cpt_device *cpt, u32 vf)
  26. {
  27. /* W1C for the VF */
  28. cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0), (1 << vf));
  29. }
  30. /*
  31. * Configure QLEN/Chunk sizes for VF
  32. */
  33. static void cpt_cfg_qlen_for_vf(struct cpt_device *cpt, int vf, u32 size)
  34. {
  35. union cptx_pf_qx_ctl pf_qx_ctl;
  36. pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf));
  37. pf_qx_ctl.s.size = size;
  38. pf_qx_ctl.s.cont_err = true;
  39. cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u);
  40. }
  41. /*
  42. * Configure VQ priority
  43. */
  44. static void cpt_cfg_vq_priority(struct cpt_device *cpt, int vf, u32 pri)
  45. {
  46. union cptx_pf_qx_ctl pf_qx_ctl;
  47. pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf));
  48. pf_qx_ctl.s.pri = pri;
  49. cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u);
  50. }
  51. static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
  52. {
  53. struct microcode *mcode = cpt->mcode;
  54. union cptx_pf_qx_ctl pf_qx_ctl;
  55. struct device *dev = &cpt->pdev->dev;
  56. if (q >= CPT_MAX_VF_NUM) {
  57. dev_err(dev, "Queues are more than cores in the group");
  58. return -EINVAL;
  59. }
  60. if (grp >= CPT_MAX_CORE_GROUPS) {
  61. dev_err(dev, "Request group is more than possible groups");
  62. return -EINVAL;
  63. }
  64. if (grp >= cpt->next_mc_idx) {
  65. dev_err(dev, "Request group is higher than available functional groups");
  66. return -EINVAL;
  67. }
  68. pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q));
  69. pf_qx_ctl.s.grp = mcode[grp].group;
  70. cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q), pf_qx_ctl.u);
  71. dev_dbg(dev, "VF %d TYPE %s", q, (mcode[grp].is_ae ? "AE" : "SE"));
  72. return mcode[grp].is_ae ? AE_TYPES : SE_TYPES;
  73. }
  74. /* Interrupt handler to handle mailbox messages from VFs */
  75. static void cpt_handle_mbox_intr(struct cpt_device *cpt, int vf)
  76. {
  77. struct cpt_vf_info *vfx = &cpt->vfinfo[vf];
  78. struct cpt_mbox mbx = {};
  79. int vftype;
  80. struct device *dev = &cpt->pdev->dev;
  81. /*
  82. * MBOX[0] contains msg
  83. * MBOX[1] contains data
  84. */
  85. mbx.msg = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0));
  86. mbx.data = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1));
  87. dev_dbg(dev, "%s: Mailbox msg 0x%llx from VF%d", __func__, mbx.msg, vf);
  88. switch (mbx.msg) {
  89. case CPT_MSG_VF_UP:
  90. vfx->state = VF_STATE_UP;
  91. try_module_get(THIS_MODULE);
  92. cpt_mbox_send_ack(cpt, vf, &mbx);
  93. break;
  94. case CPT_MSG_READY:
  95. mbx.msg = CPT_MSG_READY;
  96. mbx.data = vf;
  97. cpt_send_msg_to_vf(cpt, vf, &mbx);
  98. break;
  99. case CPT_MSG_VF_DOWN:
  100. /* First msg in VF teardown sequence */
  101. vfx->state = VF_STATE_DOWN;
  102. module_put(THIS_MODULE);
  103. cpt_mbox_send_ack(cpt, vf, &mbx);
  104. break;
  105. case CPT_MSG_QLEN:
  106. vfx->qlen = mbx.data;
  107. cpt_cfg_qlen_for_vf(cpt, vf, vfx->qlen);
  108. cpt_mbox_send_ack(cpt, vf, &mbx);
  109. break;
  110. case CPT_MSG_QBIND_GRP:
  111. vftype = cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
  112. if ((vftype != AE_TYPES) && (vftype != SE_TYPES))
  113. dev_err(dev, "Queue %d binding to group %llu failed",
  114. vf, mbx.data);
  115. else {
  116. dev_dbg(dev, "Queue %d binding to group %llu successful",
  117. vf, mbx.data);
  118. mbx.msg = CPT_MSG_QBIND_GRP;
  119. mbx.data = vftype;
  120. cpt_send_msg_to_vf(cpt, vf, &mbx);
  121. }
  122. break;
  123. case CPT_MSG_VQ_PRIORITY:
  124. vfx->priority = mbx.data;
  125. cpt_cfg_vq_priority(cpt, vf, vfx->priority);
  126. cpt_mbox_send_ack(cpt, vf, &mbx);
  127. break;
  128. default:
  129. dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
  130. vf, mbx.msg);
  131. break;
  132. }
  133. }
  134. void cpt_mbox_intr_handler (struct cpt_device *cpt, int mbx)
  135. {
  136. u64 intr;
  137. u8 vf;
  138. intr = cpt_read_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0));
  139. dev_dbg(&cpt->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
  140. for (vf = 0; vf < CPT_MAX_VF_NUM; vf++) {
  141. if (intr & (1ULL << vf)) {
  142. dev_dbg(&cpt->pdev->dev, "Intr from VF %d\n", vf);
  143. cpt_handle_mbox_intr(cpt, vf);
  144. cpt_clear_mbox_intr(cpt, vf);
  145. }
  146. }
  147. }