icrdma_hw.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
  2. /* Copyright (c) 2017 - 2021 Intel Corporation */
  3. #include "osdep.h"
  4. #include "type.h"
  5. #include "icrdma_hw.h"
  6. static u32 icrdma_regs[IRDMA_MAX_REGS] = {
  7. PFPE_CQPTAIL,
  8. PFPE_CQPDB,
  9. PFPE_CCQPSTATUS,
  10. PFPE_CCQPHIGH,
  11. PFPE_CCQPLOW,
  12. PFPE_CQARM,
  13. PFPE_CQACK,
  14. PFPE_AEQALLOC,
  15. PFPE_CQPERRCODES,
  16. PFPE_WQEALLOC,
  17. GLINT_DYN_CTL(0),
  18. ICRDMA_DB_ADDR_OFFSET,
  19. GLPCI_LBARCTRL,
  20. GLPE_CPUSTATUS0,
  21. GLPE_CPUSTATUS1,
  22. GLPE_CPUSTATUS2,
  23. PFINT_AEQCTL,
  24. GLINT_CEQCTL(0),
  25. VSIQF_PE_CTL1(0),
  26. PFHMC_PDINV,
  27. GLHMC_VFPDINV(0),
  28. GLPE_CRITERR,
  29. GLINT_RATE(0),
  30. };
  31. static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
  32. ICRDMA_CCQPSTATUS_CCQP_DONE,
  33. ICRDMA_CCQPSTATUS_CCQP_ERR,
  34. ICRDMA_CQPSQ_STAG_PDID,
  35. ICRDMA_CQPSQ_CQ_CEQID,
  36. ICRDMA_CQPSQ_CQ_CQID,
  37. ICRDMA_COMMIT_FPM_CQCNT,
  38. };
  39. static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
  40. ICRDMA_CCQPSTATUS_CCQP_DONE_S,
  41. ICRDMA_CCQPSTATUS_CCQP_ERR_S,
  42. ICRDMA_CQPSQ_STAG_PDID_S,
  43. ICRDMA_CQPSQ_CQ_CEQID_S,
  44. ICRDMA_CQPSQ_CQ_CQID_S,
  45. ICRDMA_COMMIT_FPM_CQCNT_S,
  46. };
  47. /**
  48. * icrdma_ena_irq - Enable interrupt
  49. * @dev: pointer to the device structure
  50. * @idx: vector index
  51. */
  52. static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
  53. {
  54. u32 val;
  55. u32 interval = 0;
  56. if (dev->ceq_itr && dev->aeq->msix_idx != idx)
  57. interval = dev->ceq_itr >> 1; /* 2 usec units */
  58. val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) |
  59. FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
  60. FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
  61. FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
  62. if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
  63. writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
  64. else
  65. writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
  66. }
  67. /**
  68. * icrdma_disable_irq - Disable interrupt
  69. * @dev: pointer to the device structure
  70. * @idx: vector index
  71. */
  72. static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
  73. {
  74. if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
  75. writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
  76. else
  77. writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
  78. }
  79. /**
  80. * icrdma_cfg_ceq- Configure CEQ interrupt
  81. * @dev: pointer to the device structure
  82. * @ceq_id: Completion Event Queue ID
  83. * @idx: vector index
  84. * @enable: True to enable, False disables
  85. */
  86. static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
  87. bool enable)
  88. {
  89. u32 reg_val;
  90. reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
  91. FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
  92. FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3);
  93. writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
  94. }
  95. static const struct irdma_irq_ops icrdma_irq_ops = {
  96. .irdma_cfg_aeq = irdma_cfg_aeq,
  97. .irdma_cfg_ceq = icrdma_cfg_ceq,
  98. .irdma_dis_irq = icrdma_disable_irq,
  99. .irdma_en_irq = icrdma_ena_irq,
  100. };
  101. void icrdma_init_hw(struct irdma_sc_dev *dev)
  102. {
  103. int i;
  104. u8 __iomem *hw_addr;
  105. for (i = 0; i < IRDMA_MAX_REGS; ++i) {
  106. hw_addr = dev->hw->hw_addr;
  107. if (i == IRDMA_DB_ADDR_OFFSET)
  108. hw_addr = NULL;
  109. dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]);
  110. }
  111. dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
  112. dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
  113. for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
  114. dev->hw_shifts[i] = icrdma_shifts[i];
  115. for (i = 0; i < IRDMA_MAX_MASKS; ++i)
  116. dev->hw_masks[i] = icrdma_masks[i];
  117. dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
  118. dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
  119. dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
  120. dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
  121. dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
  122. dev->irq_ops = &icrdma_irq_ops;
  123. dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
  124. dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
  125. dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
  126. dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
  127. dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
  128. dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
  129. IRDMA_FEATURE_CQ_RESIZE;
  130. }