bfa_hw_cb.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  4. * Copyright (c) 2014- QLogic Corporation.
  5. * All rights reserved
  6. * www.qlogic.com
  7. *
  8. * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
  9. */
  10. #include "bfad_drv.h"
  11. #include "bfa_modules.h"
  12. #include "bfi_reg.h"
  13. void
  14. bfa_hwcb_reginit(struct bfa_s *bfa)
  15. {
  16. struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
  17. void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
  18. int fn = bfa_ioc_pcifn(&bfa->ioc);
  19. if (fn == 0) {
  20. bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
  21. bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK);
  22. } else {
  23. bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
  24. bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
  25. }
  26. }
  27. static void
  28. bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
  29. {
  30. writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
  31. bfa->iocfc.bfa_regs.intr_status);
  32. }
  33. /*
  34. * Actions to respond RME Interrupt for Crossbow ASIC:
  35. * - Write 1 to Interrupt Status register
  36. * INTX - done in bfa_intx()
  37. * MSIX - done in bfa_hwcb_rspq_ack_msix()
  38. * - Update CI (only if new CI)
  39. */
  40. static void
  41. bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
  42. {
  43. writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
  44. bfa->iocfc.bfa_regs.intr_status);
  45. if (bfa_rspq_ci(bfa, rspq) == ci)
  46. return;
  47. bfa_rspq_ci(bfa, rspq) = ci;
  48. writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
  49. }
  50. void
  51. bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
  52. {
  53. if (bfa_rspq_ci(bfa, rspq) == ci)
  54. return;
  55. bfa_rspq_ci(bfa, rspq) = ci;
  56. writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
  57. }
  58. void
  59. bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
  60. u32 *num_vecs, u32 *max_vec_bit)
  61. {
  62. #define __HFN_NUMINTS 13
  63. if (bfa_ioc_pcifn(&bfa->ioc) == 0) {
  64. *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
  65. __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
  66. __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
  67. __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
  68. __HFN_INT_MBOX_LPU0);
  69. *max_vec_bit = __HFN_INT_MBOX_LPU0;
  70. } else {
  71. *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
  72. __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
  73. __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
  74. __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
  75. __HFN_INT_MBOX_LPU1);
  76. *max_vec_bit = __HFN_INT_MBOX_LPU1;
  77. }
  78. *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
  79. __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
  80. *num_vecs = __HFN_NUMINTS;
  81. }
  82. /*
  83. * Dummy interrupt handler for handling spurious interrupts.
  84. */
  85. static void
  86. bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
  87. {
  88. }
  89. /*
  90. * No special setup required for crossbow -- vector assignments are implicit.
  91. */
  92. void
  93. bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
  94. {
  95. WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
  96. bfa->msix.nvecs = nvecs;
  97. bfa_hwcb_msix_uninstall(bfa);
  98. }
  99. void
  100. bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
  101. {
  102. int i;
  103. if (bfa->msix.nvecs == 0)
  104. return;
  105. if (bfa->msix.nvecs == 1) {
  106. for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++)
  107. bfa->msix.handler[i] = bfa_msix_all;
  108. return;
  109. }
  110. for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
  111. bfa->msix.handler[i] = bfa_msix_lpu_err;
  112. }
  113. void
  114. bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
  115. {
  116. int i;
  117. if (bfa->msix.nvecs == 0)
  118. return;
  119. if (bfa->msix.nvecs == 1) {
  120. for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
  121. bfa->msix.handler[i] = bfa_msix_all;
  122. return;
  123. }
  124. for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
  125. bfa->msix.handler[i] = bfa_msix_reqq;
  126. for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
  127. bfa->msix.handler[i] = bfa_msix_rspq;
  128. }
  129. void
  130. bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
  131. {
  132. int i;
  133. for (i = 0; i < BFI_MSIX_CB_MAX; i++)
  134. bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
  135. }
  136. /*
  137. * No special enable/disable -- vector assignments are implicit.
  138. */
  139. void
  140. bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
  141. {
  142. if (msix) {
  143. bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
  144. bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
  145. } else {
  146. bfa->iocfc.hwif.hw_reqq_ack = NULL;
  147. bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
  148. }
  149. }
  150. void
  151. bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
  152. {
  153. *start = BFI_MSIX_RME_QMIN_CB;
  154. *end = BFI_MSIX_RME_QMAX_CB;
  155. }