qed*: Utilize Firmware 8.15.3.0
This patch advances the qed* drivers into using the newer firmware - This solves several firmware bugs, mostly related [but not limited to] various init/deinit issues in various offloaded protocols. It also introduces a major 4-Cached SGE change in firmware, which can be seen in the storage drivers' changes. In addition, this firmware is required for supporting the new QL41xxx series of adapters; While this patch doesn't add the actual support, the firmware contains the necessary initialization & firmware logic to operate such adapters [actual support would be added later on]. Changes from Previous versions: ------------------------------- - V2 - fix kbuild-test robot warnings Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com> Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com> Signed-off-by: Manish Rangankar <Manish.Rangankar@cavium.com> Signed-off-by: Chad Dupuis <Chad.Dupuis@cavium.com> Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:

committed by
David S. Miller

parent
6a019c5c50
commit
be086e7c53
@@ -51,7 +51,7 @@
|
||||
#include "qed_hsi.h"
|
||||
|
||||
extern const struct qed_common_ops qed_common_ops_pass;
|
||||
#define DRV_MODULE_VERSION "8.10.10.20"
|
||||
#define DRV_MODULE_VERSION "8.10.10.21"
|
||||
|
||||
#define MAX_HWFNS_PER_DEVICE (4)
|
||||
#define NAME_SIZE 16
|
||||
|
@@ -71,8 +71,7 @@
|
||||
#define TM_ALIGN BIT(TM_SHIFT)
|
||||
#define TM_ELEM_SIZE 4
|
||||
|
||||
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
|
||||
#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
|
||||
#define ILT_DEFAULT_HW_P_SIZE 4
|
||||
|
||||
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
|
||||
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
|
||||
@@ -1126,7 +1125,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
|
||||
clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
|
||||
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
|
||||
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
|
||||
/* default ILT page size for all clients is 32K */
|
||||
/* default ILT page size for all clients is 64K */
|
||||
for (i = 0; i < ILT_CLI_MAX; i++)
|
||||
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
|
||||
{
|
||||
u32 qm_line_crd;
|
||||
|
||||
/* In A0 - Limit the size of pbf queue so that only 511 commands with
|
||||
* the minimum size of 4 (FCoE minimum size)
|
||||
*/
|
||||
bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
|
||||
|
||||
if (is_bb_a0)
|
||||
cmdq_lines = min_t(u32, cmdq_lines, 1022);
|
||||
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
|
||||
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
|
||||
(u32)cmdq_lines);
|
||||
@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
|
||||
u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
|
||||
u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
|
||||
QM_PF_QUEUE_GROUP_SIZE;
|
||||
bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
|
||||
u16 i, pq_id, pq_group;
|
||||
|
||||
/* a bit per Tx PQ indicating if the PQ is associated with a VF */
|
||||
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
|
||||
u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
|
||||
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
|
||||
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
|
||||
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
|
||||
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
|
||||
u32 mem_addr_4kb = base_mem_addr_4kb;
|
||||
@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
|
||||
bool is_vf_pq = (i >= p_params->num_pf_pqs);
|
||||
struct qm_rf_pq_map tx_pq_map;
|
||||
|
||||
bool rl_valid = p_params->pq_params[i].rl_valid &&
|
||||
(p_params->pq_params[i].vport_id <
|
||||
MAX_QM_GLOBAL_RLS);
|
||||
|
||||
/* update first Tx PQ of VPORT/TC */
|
||||
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
|
||||
p_params->start_vport;
|
||||
@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
|
||||
(p_params->pf_id <<
|
||||
QM_WFQ_VP_PQ_PF_SHIFT));
|
||||
}
|
||||
|
||||
if (p_params->pq_params[i].rl_valid && !rl_valid)
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Invalid VPORT ID for rate limiter configuration");
|
||||
/* fill PQ map entry */
|
||||
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
|
||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
|
||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
|
||||
p_params->pq_params[i].rl_valid ? 1 : 0);
|
||||
SET_FIELD(tx_pq_map.reg,
|
||||
QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
|
||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
|
||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
|
||||
p_params->pq_params[i].rl_valid ?
|
||||
rl_valid ?
|
||||
p_params->pq_params[i].vport_id : 0);
|
||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
|
||||
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
|
||||
@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
|
||||
/* if PQ is associated with a VF, add indication
|
||||
* to PQ VF mask
|
||||
*/
|
||||
tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
|
||||
(1 << (pq_id % tx_pq_vf_mask_width));
|
||||
tx_pq_vf_mask[pq_id /
|
||||
QM_PF_QUEUE_GROUP_SIZE] |=
|
||||
BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
|
||||
mem_addr_4kb += vport_pq_mem_4kb;
|
||||
} else {
|
||||
mem_addr_4kb += pq_mem_4kb;
|
||||
@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
|
||||
if (p_params->pf_id < MAX_NUM_PFS_BB)
|
||||
crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
|
||||
else
|
||||
crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
|
||||
(p_params->pf_id % MAX_NUM_PFS_BB);
|
||||
crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
|
||||
crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
|
||||
|
||||
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
|
||||
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
|
||||
@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
|
||||
QM_WFQ_CRD_REG_SIGN_BIT);
|
||||
}
|
||||
|
||||
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
|
||||
inc_val);
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
|
||||
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
|
||||
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
|
||||
inc_val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
|
||||
{
|
||||
u8 i, vport_id;
|
||||
|
||||
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Invalid VPORT ID for rate limiter configuration");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* go over all PF VPORTs */
|
||||
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
|
||||
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
|
||||
@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
|
||||
{
|
||||
u32 inc_val = QM_RL_INC_VAL(vport_rl);
|
||||
|
||||
if (vport_id >= MAX_QM_GLOBAL_RLS) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Invalid VPORT ID for rate limiter configuration");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (inc_val > QM_RL_MAX_INC_VAL) {
|
||||
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
|
||||
return -1;
|
||||
@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
|
||||
eth_geneve_enable ? 1 : 0);
|
||||
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
|
||||
|
||||
/* comp ver */
|
||||
reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
|
||||
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
|
||||
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
|
||||
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
|
||||
|
||||
/* EDPM with geneve tunnel not supported in BB_B0 */
|
||||
if (QED_IS_BB_B0(p_hwfn->cdev))
|
||||
return;
|
||||
|
@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
|
||||
}
|
||||
|
||||
/* First Dword contains metadata and should be skipped */
|
||||
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
|
||||
buf_hdr = (struct bin_buffer_hdr *)data;
|
||||
|
||||
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
|
||||
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
|
||||
|
@@ -594,7 +594,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
|
||||
u8 bd_flags = 0;
|
||||
|
||||
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
|
||||
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
|
||||
SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
|
||||
|
||||
return bd_flags;
|
||||
}
|
||||
@@ -755,8 +755,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
|
||||
p_buffer->placement_offset;
|
||||
parse_flags = p_buffer->parse_flags;
|
||||
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
|
||||
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
|
||||
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
|
||||
SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
|
||||
SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
|
||||
|
||||
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
|
||||
p_buffer->vlan, bd_flags,
|
||||
@@ -1588,33 +1588,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
|
||||
p_tx->cur_send_frag_num++;
|
||||
}
|
||||
|
||||
static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_ll2,
|
||||
struct qed_ll2_tx_packet *p_curp,
|
||||
u8 num_of_bds,
|
||||
enum core_tx_dest tx_dest,
|
||||
u16 vlan,
|
||||
u8 bd_flags,
|
||||
u16 l4_hdr_offset_w,
|
||||
enum core_roce_flavor_type type,
|
||||
dma_addr_t first_frag,
|
||||
u16 first_frag_len)
|
||||
static void
|
||||
qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_ll2,
|
||||
struct qed_ll2_tx_packet *p_curp,
|
||||
u8 num_of_bds,
|
||||
enum core_tx_dest tx_dest,
|
||||
u16 vlan,
|
||||
u8 bd_flags,
|
||||
u16 l4_hdr_offset_w,
|
||||
enum core_roce_flavor_type roce_flavor,
|
||||
dma_addr_t first_frag,
|
||||
u16 first_frag_len)
|
||||
{
|
||||
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
|
||||
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
|
||||
struct core_tx_bd *start_bd = NULL;
|
||||
u16 frag_idx;
|
||||
u16 bd_data = 0, frag_idx;
|
||||
|
||||
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
|
||||
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
|
||||
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
|
||||
cpu_to_le16(l4_hdr_offset_w));
|
||||
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
|
||||
start_bd->bd_flags.as_bitfield = bd_flags;
|
||||
start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
|
||||
CORE_TX_BD_FLAGS_START_BD_SHIFT;
|
||||
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
|
||||
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
|
||||
bd_data |= bd_flags;
|
||||
SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
|
||||
SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
|
||||
SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
|
||||
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
|
||||
DMA_REGPAIR_LE(start_bd->addr, first_frag);
|
||||
start_bd->nbytes = cpu_to_le16(first_frag_len);
|
||||
|
||||
@@ -1639,9 +1640,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
|
||||
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
|
||||
|
||||
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
|
||||
(*p_bd)->bd_flags.as_bitfield = 0;
|
||||
(*p_bd)->bd_data.as_bitfield = 0;
|
||||
(*p_bd)->bitfield1 = 0;
|
||||
(*p_bd)->bitfield0 = 0;
|
||||
p_curp->bds_set[frag_idx].tx_frag = 0;
|
||||
p_curp->bds_set[frag_idx].frag_len = 0;
|
||||
}
|
||||
@@ -2238,11 +2238,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
|
||||
/* Request HW to calculate IP csum */
|
||||
if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
|
||||
ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
|
||||
flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
|
||||
flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
|
||||
|
||||
if (skb_vlan_tag_present(skb)) {
|
||||
vlan = skb_vlan_tag_get(skb);
|
||||
flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
|
||||
flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
|
||||
}
|
||||
|
||||
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
|
||||
|
@@ -356,6 +356,10 @@
|
||||
0x238804UL
|
||||
#define RDIF_REG_STOP_ON_ERROR \
|
||||
0x300040UL
|
||||
#define RDIF_REG_DEBUG_ERROR_INFO \
|
||||
0x300400UL
|
||||
#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
|
||||
64
|
||||
#define SRC_REG_SOFT_RST \
|
||||
0x23874cUL
|
||||
#define TCFC_REG_ACTIVITY_COUNTER \
|
||||
@@ -370,6 +374,10 @@
|
||||
0x1700004UL
|
||||
#define TDIF_REG_STOP_ON_ERROR \
|
||||
0x310040UL
|
||||
#define TDIF_REG_DEBUG_ERROR_INFO \
|
||||
0x310400UL
|
||||
#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
|
||||
64
|
||||
#define UCM_REG_INIT \
|
||||
0x1280000UL
|
||||
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
|
||||
@@ -1236,6 +1244,26 @@
|
||||
0x1901534UL
|
||||
#define USEM_REG_DBG_FORCE_FRAME \
|
||||
0x1901538UL
|
||||
#define NWS_REG_DBG_SELECT \
|
||||
0x700128UL
|
||||
#define NWS_REG_DBG_DWORD_ENABLE \
|
||||
0x70012cUL
|
||||
#define NWS_REG_DBG_SHIFT \
|
||||
0x700130UL
|
||||
#define NWS_REG_DBG_FORCE_VALID \
|
||||
0x700134UL
|
||||
#define NWS_REG_DBG_FORCE_FRAME \
|
||||
0x700138UL
|
||||
#define MS_REG_DBG_SELECT \
|
||||
0x6a0228UL
|
||||
#define MS_REG_DBG_DWORD_ENABLE \
|
||||
0x6a022cUL
|
||||
#define MS_REG_DBG_SHIFT \
|
||||
0x6a0230UL
|
||||
#define MS_REG_DBG_FORCE_VALID \
|
||||
0x6a0234UL
|
||||
#define MS_REG_DBG_FORCE_FRAME \
|
||||
0x6a0238UL
|
||||
#define PCIE_REG_DBG_COMMON_SELECT \
|
||||
0x054398UL
|
||||
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
|
||||
@@ -1448,6 +1476,8 @@
|
||||
0x000b48UL
|
||||
#define RSS_REG_RSS_RAM_DATA \
|
||||
0x238c20UL
|
||||
#define RSS_REG_RSS_RAM_DATA_SIZE \
|
||||
4
|
||||
#define MISC_REG_BLOCK_256B_EN \
|
||||
0x008c14UL
|
||||
#define NWS_REG_NWS_CMU \
|
||||
|
@@ -66,13 +66,27 @@
|
||||
#include "qed_roce.h"
|
||||
#include "qed_ll2.h"
|
||||
|
||||
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
|
||||
struct event_ring_entry *p_eqe)
|
||||
{
|
||||
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
|
||||
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
|
||||
|
||||
p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
|
||||
p_eqe->opcode, &p_eqe->data);
|
||||
void qed_roce_async_event(struct qed_hwfn *p_hwfn,
|
||||
u8 fw_event_code, union rdma_eqe_data *rdma_data)
|
||||
{
|
||||
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
|
||||
u16 icid =
|
||||
(u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
|
||||
|
||||
/* icid release in this async event can occur only if the icid
|
||||
* was offloaded to the FW. In case it wasn't offloaded this is
|
||||
* handled in qed_roce_sp_destroy_qp.
|
||||
*/
|
||||
qed_roce_free_real_icid(p_hwfn, icid);
|
||||
} else {
|
||||
struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
|
||||
|
||||
events->affiliated_event(p_hwfn->p_rdma_info->events.context,
|
||||
fw_event_code,
|
||||
&rdma_data->async_handle);
|
||||
}
|
||||
}
|
||||
|
||||
static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
|
||||
@@ -113,6 +127,15 @@ static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
|
||||
struct qed_bmap *bmap, u32 id_num)
|
||||
{
|
||||
if (id_num >= bmap->max_count)
|
||||
return;
|
||||
|
||||
__set_bit(id_num, bmap->bitmap);
|
||||
}
|
||||
|
||||
static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
|
||||
struct qed_bmap *bmap, u32 id_num)
|
||||
{
|
||||
@@ -129,6 +152,15 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
}
|
||||
|
||||
static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
|
||||
struct qed_bmap *bmap, u32 id_num)
|
||||
{
|
||||
if (id_num >= bmap->max_count)
|
||||
return -1;
|
||||
|
||||
return test_bit(id_num, bmap->bitmap);
|
||||
}
|
||||
|
||||
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
|
||||
{
|
||||
/* First sb id for RoCE is after all the l2 sb */
|
||||
@@ -170,7 +202,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
|
||||
/* Queue zone lines are shared between RoCE and L2 in such a way that
|
||||
* they can be used by each without obstructing the other.
|
||||
*/
|
||||
p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
|
||||
p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
|
||||
p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
|
||||
|
||||
/* Allocate a struct with device params and fill it */
|
||||
p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
|
||||
@@ -248,9 +281,18 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
|
||||
goto free_tid_map;
|
||||
}
|
||||
|
||||
/* Allocate bitmap for cids used for responders/requesters. */
|
||||
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
|
||||
if (rc) {
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
|
||||
"Failed to allocate real cid bitmap, rc = %d\n", rc);
|
||||
goto free_cid_map;
|
||||
}
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
|
||||
return 0;
|
||||
|
||||
free_cid_map:
|
||||
kfree(p_rdma_info->cid_map.bitmap);
|
||||
free_tid_map:
|
||||
kfree(p_rdma_info->tid_map.bitmap);
|
||||
free_toggle_map:
|
||||
@@ -273,7 +315,22 @@ free_rdma_info:
|
||||
|
||||
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
|
||||
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
|
||||
int wait_count = 0;
|
||||
|
||||
/* when destroying a_RoCE QP the control is returned to the user after
|
||||
* the synchronous part. The asynchronous part may take a little longer.
|
||||
* We delay for a short while if an async destroy QP is still expected.
|
||||
* Beyond the added delay we clear the bitmap anyway.
|
||||
*/
|
||||
while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
|
||||
msleep(100);
|
||||
if (wait_count++ > 20) {
|
||||
DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(p_rdma_info->cid_map.bitmap);
|
||||
kfree(p_rdma_info->tid_map.bitmap);
|
||||
@@ -724,6 +781,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
|
||||
u32 addr;
|
||||
|
||||
p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
|
||||
if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"queue zone offset %d is too large (max is %d)\n",
|
||||
qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
|
||||
return;
|
||||
}
|
||||
|
||||
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
|
||||
addr = GTT_BAR0_MAP_REG_USDM_RAM +
|
||||
USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
|
||||
@@ -1080,6 +1145,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
|
||||
return flavor;
|
||||
}
|
||||
|
||||
void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
|
||||
{
|
||||
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
|
||||
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
|
||||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
}
|
||||
|
||||
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
|
||||
{
|
||||
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
|
||||
@@ -1139,6 +1212,13 @@ err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
|
||||
{
|
||||
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
|
||||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
}
|
||||
|
||||
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
|
||||
struct qed_rdma_qp *qp)
|
||||
{
|
||||
@@ -1147,7 +1227,8 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
|
||||
union qed_qm_pq_params qm_params;
|
||||
enum roce_flavor roce_flavor;
|
||||
struct qed_spq_entry *p_ent;
|
||||
u16 physical_queue0 = 0;
|
||||
u16 regular_latency_queue;
|
||||
enum protocol_type proto;
|
||||
int rc;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
|
||||
@@ -1229,15 +1310,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
|
||||
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
|
||||
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
|
||||
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
|
||||
p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
|
||||
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
|
||||
qp->rq_cq_id);
|
||||
|
||||
memset(&qm_params, 0, sizeof(qm_params));
|
||||
qm_params.roce.qpid = qp->icid >> 1;
|
||||
physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
|
||||
regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
|
||||
&qm_params);
|
||||
|
||||
p_ramrod->regular_latency_phy_queue =
|
||||
cpu_to_le16(regular_latency_queue);
|
||||
p_ramrod->low_latency_phy_queue =
|
||||
cpu_to_le16(regular_latency_queue);
|
||||
|
||||
p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
|
||||
p_ramrod->dpi = cpu_to_le16(qp->dpi);
|
||||
|
||||
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
|
||||
@@ -1253,13 +1338,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
|
||||
|
||||
rc = qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
|
||||
rc, physical_queue0);
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
|
||||
"rc = %d regular physical queue = 0x%x\n", rc,
|
||||
regular_latency_queue);
|
||||
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
qp->resp_offloaded = true;
|
||||
qp->cq_prod = 0;
|
||||
|
||||
proto = p_hwfn->p_rdma_info->proto;
|
||||
qed_roce_set_real_cid(p_hwfn, qp->icid -
|
||||
qed_cxt_get_proto_cid_start(p_hwfn, proto));
|
||||
|
||||
return rc;
|
||||
|
||||
@@ -1280,7 +1371,8 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
|
||||
union qed_qm_pq_params qm_params;
|
||||
enum roce_flavor roce_flavor;
|
||||
struct qed_spq_entry *p_ent;
|
||||
u16 physical_queue0 = 0;
|
||||
u16 regular_latency_queue;
|
||||
enum protocol_type proto;
|
||||
int rc;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
|
||||
@@ -1351,15 +1443,19 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
|
||||
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
|
||||
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
|
||||
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
|
||||
p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
|
||||
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
|
||||
qp->sq_cq_id);
|
||||
p_ramrod->cq_cid =
|
||||
cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
|
||||
|
||||
memset(&qm_params, 0, sizeof(qm_params));
|
||||
qm_params.roce.qpid = qp->icid >> 1;
|
||||
physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
|
||||
regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
|
||||
&qm_params);
|
||||
|
||||
p_ramrod->regular_latency_phy_queue =
|
||||
cpu_to_le16(regular_latency_queue);
|
||||
p_ramrod->low_latency_phy_queue =
|
||||
cpu_to_le16(regular_latency_queue);
|
||||
|
||||
p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
|
||||
p_ramrod->dpi = cpu_to_le16(qp->dpi);
|
||||
|
||||
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
|
||||
@@ -1378,6 +1474,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
|
||||
goto err;
|
||||
|
||||
qp->req_offloaded = true;
|
||||
proto = p_hwfn->p_rdma_info->proto;
|
||||
qed_roce_set_real_cid(p_hwfn,
|
||||
qp->icid + 1 -
|
||||
qed_cxt_get_proto_cid_start(p_hwfn, proto));
|
||||
|
||||
return rc;
|
||||
|
||||
@@ -1577,7 +1677,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
|
||||
|
||||
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
|
||||
struct qed_rdma_qp *qp,
|
||||
u32 *num_invalidated_mw)
|
||||
u32 *num_invalidated_mw,
|
||||
u32 *cq_prod)
|
||||
{
|
||||
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
|
||||
struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
|
||||
@@ -1588,8 +1689,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
|
||||
|
||||
if (!qp->resp_offloaded)
|
||||
*num_invalidated_mw = 0;
|
||||
*cq_prod = qp->cq_prod;
|
||||
|
||||
if (!qp->resp_offloaded) {
|
||||
/* If a responder was never offload, we need to free the cids
|
||||
* allocated in create_qp as a FW async event will never arrive
|
||||
*/
|
||||
u32 cid;
|
||||
|
||||
cid = qp->icid -
|
||||
qed_cxt_get_proto_cid_start(p_hwfn,
|
||||
p_hwfn->p_rdma_info->proto);
|
||||
qed_roce_free_cid_pair(p_hwfn, (u16)cid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
@@ -1624,6 +1739,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
|
||||
goto err;
|
||||
|
||||
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
|
||||
*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
|
||||
qp->cq_prod = *cq_prod;
|
||||
|
||||
/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
@@ -1827,10 +1944,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
|
||||
|
||||
out_params->draining = false;
|
||||
|
||||
if (rq_err_state)
|
||||
if (rq_err_state || sq_err_state)
|
||||
qp->cur_state = QED_ROCE_QP_STATE_ERR;
|
||||
else if (sq_err_state)
|
||||
qp->cur_state = QED_ROCE_QP_STATE_SQE;
|
||||
else if (sq_draining)
|
||||
out_params->draining = true;
|
||||
out_params->state = qp->cur_state;
|
||||
@@ -1849,10 +1964,9 @@ err_resp:
|
||||
|
||||
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
|
||||
{
|
||||
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
|
||||
u32 num_invalidated_mw = 0;
|
||||
u32 num_bound_mw = 0;
|
||||
u32 start_cid;
|
||||
u32 cq_prod;
|
||||
int rc;
|
||||
|
||||
/* Destroys the specified QP */
|
||||
@@ -1866,7 +1980,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
|
||||
|
||||
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
|
||||
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
|
||||
&num_invalidated_mw);
|
||||
&num_invalidated_mw,
|
||||
&cq_prod);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@@ -1881,21 +1996,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
|
||||
"number of invalidate memory windows is different from bounded ones\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_bh(&p_rdma_info->lock);
|
||||
|
||||
start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
|
||||
p_rdma_info->proto);
|
||||
|
||||
/* Release responder's icid */
|
||||
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
|
||||
qp->icid - start_cid);
|
||||
|
||||
/* Release requester's icid */
|
||||
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
|
||||
qp->icid + 1 - start_cid);
|
||||
|
||||
spin_unlock_bh(&p_rdma_info->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -2110,12 +2210,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
|
||||
return rc;
|
||||
} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
|
||||
/* Any state -> RESET */
|
||||
u32 cq_prod;
|
||||
|
||||
/* Send destroy responder ramrod */
|
||||
rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
|
||||
qp,
|
||||
&num_invalidated_mw,
|
||||
&cq_prod);
|
||||
|
||||
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
|
||||
&num_invalidated_mw);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
qp->cq_prod = cq_prod;
|
||||
|
||||
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
|
||||
&num_bound_mw);
|
||||
|
||||
@@ -2454,6 +2561,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
|
||||
{
|
||||
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
|
||||
u32 start_cid, cid, xcid;
|
||||
|
||||
/* an even icid belongs to a responder while an odd icid belongs to a
|
||||
* requester. The 'cid' received as an input can be either. We calculate
|
||||
* the "partner" icid and call it xcid. Only if both are free then the
|
||||
* "cid" map can be cleared.
|
||||
*/
|
||||
start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
|
||||
cid = icid - start_cid;
|
||||
xcid = cid ^ 1;
|
||||
|
||||
spin_lock_bh(&p_rdma_info->lock);
|
||||
|
||||
qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
|
||||
if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
|
||||
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
|
||||
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
}
|
||||
|
||||
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
|
||||
{
|
||||
return QED_LEADING_HWFN(cdev);
|
||||
@@ -2773,7 +2905,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
|
||||
: QED_LL2_RROCE;
|
||||
|
||||
if (pkt->roce_mode == ROCE_V2_IPV4)
|
||||
flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
|
||||
flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
|
||||
|
||||
/* Tx header */
|
||||
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
|
||||
|
@@ -82,6 +82,7 @@ struct qed_rdma_info {
|
||||
struct qed_bmap qp_map;
|
||||
struct qed_bmap srq_map;
|
||||
struct qed_bmap cid_map;
|
||||
struct qed_bmap real_cid_map;
|
||||
struct qed_bmap dpi_map;
|
||||
struct qed_bmap toggle_bits;
|
||||
struct qed_rdma_events events;
|
||||
@@ -92,6 +93,7 @@ struct qed_rdma_info {
|
||||
u32 num_qps;
|
||||
u32 num_mrs;
|
||||
u16 queue_zone_base;
|
||||
u16 max_queue_zones;
|
||||
enum protocol_type proto;
|
||||
};
|
||||
|
||||
@@ -153,6 +155,7 @@ struct qed_rdma_qp {
|
||||
dma_addr_t irq_phys_addr;
|
||||
u8 irq_num_pages;
|
||||
bool resp_offloaded;
|
||||
u32 cq_prod;
|
||||
|
||||
u8 remote_mac_addr[6];
|
||||
u8 local_mac_addr[6];
|
||||
@@ -163,8 +166,8 @@ struct qed_rdma_qp {
|
||||
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
|
||||
struct event_ring_entry *p_eqe);
|
||||
void qed_roce_async_event(struct qed_hwfn *p_hwfn,
|
||||
u8 fw_event_code, union rdma_eqe_data *rdma_data);
|
||||
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
@@ -187,7 +190,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u16 src_mac_addr_lo, bool b_last_packet);
|
||||
#else
|
||||
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
|
||||
static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
|
||||
static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
|
||||
u8 fw_event_code,
|
||||
union rdma_eqe_data *rdma_data) {}
|
||||
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
|
@@ -296,9 +296,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
|
||||
struct event_ring_entry *p_eqe)
|
||||
{
|
||||
switch (p_eqe->protocol_id) {
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
case PROTOCOLID_ROCE:
|
||||
qed_async_roce_event(p_hwfn, p_eqe);
|
||||
qed_roce_async_event(p_hwfn, p_eqe->opcode,
|
||||
&p_eqe->data.rdma_data);
|
||||
return 0;
|
||||
#endif
|
||||
case PROTOCOLID_COMMON:
|
||||
return qed_sriov_eqe_event(p_hwfn,
|
||||
p_eqe->opcode,
|
||||
@@ -306,14 +309,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
|
||||
case PROTOCOLID_ISCSI:
|
||||
if (!IS_ENABLED(CONFIG_QED_ISCSI))
|
||||
return -EINVAL;
|
||||
if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
|
||||
u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
|
||||
|
||||
qed_ooo_release_connection_isles(p_hwfn,
|
||||
p_hwfn->p_ooo_info,
|
||||
cid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (p_hwfn->p_iscsi_info->event_cb) {
|
||||
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
|
||||
|
@@ -50,7 +50,7 @@
|
||||
#define QEDE_MAJOR_VERSION 8
|
||||
#define QEDE_MINOR_VERSION 10
|
||||
#define QEDE_REVISION_VERSION 10
|
||||
#define QEDE_ENGINEERING_VERSION 20
|
||||
#define QEDE_ENGINEERING_VERSION 21
|
||||
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
|
||||
__stringify(QEDE_MINOR_VERSION) "." \
|
||||
__stringify(QEDE_REVISION_VERSION) "." \
|
||||
|
Reference in New Issue
Block a user