nitrox_mbx.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/bitmap.h>
  3. #include <linux/workqueue.h>
  4. #include "nitrox_csr.h"
  5. #include "nitrox_hal.h"
  6. #include "nitrox_dev.h"
  7. #include "nitrox_mbx.h"
  8. #define RING_TO_VFNO(_x, _y) ((_x) / (_y))
  9. /*
  10. * mbx_msg_type - Mailbox message types
  11. */
  12. enum mbx_msg_type {
  13. MBX_MSG_TYPE_NOP,
  14. MBX_MSG_TYPE_REQ,
  15. MBX_MSG_TYPE_ACK,
  16. MBX_MSG_TYPE_NACK,
  17. };
  18. /*
  19. * mbx_msg_opcode - Mailbox message opcodes
  20. */
  21. enum mbx_msg_opcode {
  22. MSG_OP_VF_MODE = 1,
  23. MSG_OP_VF_UP,
  24. MSG_OP_VF_DOWN,
  25. MSG_OP_CHIPID_VFID,
  26. MSG_OP_MCODE_INFO = 11,
  27. };
  28. struct pf2vf_work {
  29. struct nitrox_vfdev *vfdev;
  30. struct nitrox_device *ndev;
  31. struct work_struct pf2vf_resp;
  32. };
  33. static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
  34. {
  35. u64 reg_addr;
  36. reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
  37. return nitrox_read_csr(ndev, reg_addr);
  38. }
  39. static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
  40. int ring)
  41. {
  42. u64 reg_addr;
  43. reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
  44. nitrox_write_csr(ndev, reg_addr, value);
  45. }
  46. static void pf2vf_send_response(struct nitrox_device *ndev,
  47. struct nitrox_vfdev *vfdev)
  48. {
  49. union mbox_msg msg;
  50. msg.value = vfdev->msg.value;
  51. switch (vfdev->msg.opcode) {
  52. case MSG_OP_VF_MODE:
  53. msg.data = ndev->mode;
  54. break;
  55. case MSG_OP_VF_UP:
  56. vfdev->nr_queues = vfdev->msg.data;
  57. atomic_set(&vfdev->state, __NDEV_READY);
  58. break;
  59. case MSG_OP_CHIPID_VFID:
  60. msg.id.chipid = ndev->idx;
  61. msg.id.vfid = vfdev->vfno;
  62. break;
  63. case MSG_OP_VF_DOWN:
  64. vfdev->nr_queues = 0;
  65. atomic_set(&vfdev->state, __NDEV_NOT_READY);
  66. break;
  67. case MSG_OP_MCODE_INFO:
  68. msg.data = 0;
  69. msg.mcode_info.count = 2;
  70. msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
  71. msg.mcode_info.next_se_grp = 1;
  72. msg.mcode_info.next_ae_grp = 1;
  73. break;
  74. default:
  75. msg.type = MBX_MSG_TYPE_NOP;
  76. break;
  77. }
  78. if (msg.type == MBX_MSG_TYPE_NOP)
  79. return;
  80. /* send ACK to VF */
  81. msg.type = MBX_MSG_TYPE_ACK;
  82. pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
  83. vfdev->msg.value = 0;
  84. atomic64_inc(&vfdev->mbx_resp);
  85. }
  86. static void pf2vf_resp_handler(struct work_struct *work)
  87. {
  88. struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
  89. pf2vf_resp);
  90. struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
  91. struct nitrox_device *ndev = pf2vf_resp->ndev;
  92. switch (vfdev->msg.type) {
  93. case MBX_MSG_TYPE_REQ:
  94. /* process the request from VF */
  95. pf2vf_send_response(ndev, vfdev);
  96. break;
  97. case MBX_MSG_TYPE_ACK:
  98. case MBX_MSG_TYPE_NACK:
  99. break;
  100. }
  101. kfree(pf2vf_resp);
  102. }
  103. void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
  104. {
  105. DECLARE_BITMAP(csr, BITS_PER_TYPE(u64));
  106. struct nitrox_vfdev *vfdev;
  107. struct pf2vf_work *pfwork;
  108. u64 value, reg_addr;
  109. u32 i;
  110. int vfno;
  111. /* loop for VF(0..63) */
  112. reg_addr = NPS_PKT_MBOX_INT_LO;
  113. value = nitrox_read_csr(ndev, reg_addr);
  114. bitmap_from_u64(csr, value);
  115. for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
  116. /* get the vfno from ring */
  117. vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
  118. vfdev = ndev->iov.vfdev + vfno;
  119. vfdev->ring = i;
  120. /* fill the vf mailbox data */
  121. vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
  122. pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
  123. if (!pfwork)
  124. continue;
  125. pfwork->vfdev = vfdev;
  126. pfwork->ndev = ndev;
  127. INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
  128. queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
  129. /* clear the corresponding vf bit */
  130. nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
  131. }
  132. /* loop for VF(64..127) */
  133. reg_addr = NPS_PKT_MBOX_INT_HI;
  134. value = nitrox_read_csr(ndev, reg_addr);
  135. bitmap_from_u64(csr, value);
  136. for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) {
  137. /* get the vfno from ring */
  138. vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
  139. vfdev = ndev->iov.vfdev + vfno;
  140. vfdev->ring = (i + 64);
  141. /* fill the vf mailbox data */
  142. vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
  143. pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
  144. if (!pfwork)
  145. continue;
  146. pfwork->vfdev = vfdev;
  147. pfwork->ndev = ndev;
  148. INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
  149. queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
  150. /* clear the corresponding vf bit */
  151. nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
  152. }
  153. }
  154. int nitrox_mbox_init(struct nitrox_device *ndev)
  155. {
  156. struct nitrox_vfdev *vfdev;
  157. int i;
  158. ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
  159. sizeof(struct nitrox_vfdev), GFP_KERNEL);
  160. if (!ndev->iov.vfdev)
  161. return -ENOMEM;
  162. for (i = 0; i < ndev->iov.num_vfs; i++) {
  163. vfdev = ndev->iov.vfdev + i;
  164. vfdev->vfno = i;
  165. }
  166. /* allocate pf2vf response workqueue */
  167. ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
  168. if (!ndev->iov.pf2vf_wq) {
  169. kfree(ndev->iov.vfdev);
  170. ndev->iov.vfdev = NULL;
  171. return -ENOMEM;
  172. }
  173. /* enable pf2vf mailbox interrupts */
  174. enable_pf2vf_mbox_interrupts(ndev);
  175. return 0;
  176. }
  177. void nitrox_mbox_cleanup(struct nitrox_device *ndev)
  178. {
  179. /* disable pf2vf mailbox interrupts */
  180. disable_pf2vf_mbox_interrupts(ndev);
  181. /* destroy workqueue */
  182. if (ndev->iov.pf2vf_wq)
  183. destroy_workqueue(ndev->iov.pf2vf_wq);
  184. kfree(ndev->iov.vfdev);
  185. ndev->iov.pf2vf_wq = NULL;
  186. ndev->iov.vfdev = NULL;
  187. }