Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes. For example, David Ahern's adjacency list revamp in 'net-next' conflicted with an adjacency list traversal bug fix in 'net'. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -107,15 +107,7 @@ config QEDE
|
||||
---help---
|
||||
This enables the support for ...
|
||||
|
||||
config INFINIBAND_QEDR
|
||||
tristate "QLogic qede RoCE sources [debug]"
|
||||
depends on QEDE && 64BIT
|
||||
select QED_LL2
|
||||
default n
|
||||
---help---
|
||||
This provides a temporary node that allows the compilation
|
||||
and logical testing of the InfiniBand over Ethernet support
|
||||
for QLogic QED. This would be replaced by the 'real' option
|
||||
once the QEDR driver is added [+relocated].
|
||||
config QED_RDMA
|
||||
bool
|
||||
|
||||
endif # NET_VENDOR_QLOGIC
|
||||
|
@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
|
||||
qed_selftest.o qed_dcbx.o qed_debug.o
|
||||
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
|
||||
qed-$(CONFIG_QED_LL2) += qed_ll2.o
|
||||
qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
|
||||
qed-$(CONFIG_QED_RDMA) += qed_roce.o
|
||||
|
@@ -47,13 +47,8 @@
|
||||
#define TM_ALIGN BIT(TM_SHIFT)
|
||||
#define TM_ELEM_SIZE 4
|
||||
|
||||
/* ILT constants */
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
|
||||
#define ILT_DEFAULT_HW_P_SIZE 4
|
||||
#else
|
||||
#define ILT_DEFAULT_HW_P_SIZE 3
|
||||
#endif
|
||||
#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
|
||||
|
||||
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
|
||||
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
|
||||
@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
|
||||
static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
|
||||
{
|
||||
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
|
||||
|
||||
p_mgr->srq_count = num_srqs;
|
||||
}
|
||||
|
||||
u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
|
||||
static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
|
||||
|
||||
@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
|
||||
struct qed_rdma_pf_params *p_params)
|
||||
static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
|
||||
struct qed_rdma_pf_params *p_params)
|
||||
{
|
||||
u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
|
||||
enum protocol_type proto;
|
||||
|
@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
|
||||
if (!dcbx_info)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(dcbx_info, 0, sizeof(*dcbx_info));
|
||||
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
|
||||
if (rc) {
|
||||
kfree(dcbx_info);
|
||||
@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
|
||||
if (!dcbx_info)
|
||||
return NULL;
|
||||
|
||||
memset(dcbx_info, 0, sizeof(*dcbx_info));
|
||||
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
|
||||
kfree(dcbx_info);
|
||||
return NULL;
|
||||
|
@@ -405,7 +405,7 @@ struct phy_defs {
|
||||
/***************************** Constant Arrays *******************************/
|
||||
|
||||
/* Debug arrays */
|
||||
static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
|
||||
static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
|
||||
|
||||
/* Chip constant definitions array */
|
||||
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
|
||||
@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
/* Dump MCP Trace */
|
||||
enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *dump_buf,
|
||||
bool dump, u32 *num_dumped_dwords)
|
||||
static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *dump_buf,
|
||||
bool dump, u32 *num_dumped_dwords)
|
||||
{
|
||||
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
|
||||
u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
|
||||
@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
/* Dump GRC FIFO */
|
||||
enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *dump_buf,
|
||||
bool dump, u32 *num_dumped_dwords)
|
||||
static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *dump_buf,
|
||||
bool dump, u32 *num_dumped_dwords)
|
||||
{
|
||||
u32 offset = 0, dwords_read, size_param_offset;
|
||||
bool fifo_has_data;
|
||||
@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
/* Dump IGU FIFO */
|
||||
enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *dump_buf,
|
||||
bool dump, u32 *num_dumped_dwords)
|
||||
static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *dump_buf,
|
||||
bool dump, u32 *num_dumped_dwords)
|
||||
{
|
||||
u32 offset = 0, dwords_read, size_param_offset;
|
||||
bool fifo_has_data;
|
||||
@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
/* Protection Override dump */
|
||||
enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *dump_buf,
|
||||
bool dump, u32 *num_dumped_dwords)
|
||||
static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *dump_buf,
|
||||
bool dump,
|
||||
u32 *num_dumped_dwords)
|
||||
{
|
||||
u32 offset = 0, size_param_offset, override_window_dwords;
|
||||
|
||||
@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
/* Wrapper for unifying the idle_chk and mcp_trace api */
|
||||
enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
|
||||
u32 *dump_buf,
|
||||
u32 num_dumped_dwords,
|
||||
char *results_buf)
|
||||
static enum dbg_status
|
||||
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
|
||||
u32 *dump_buf,
|
||||
u32 num_dumped_dwords,
|
||||
char *results_buf)
|
||||
{
|
||||
u32 num_errors, num_warnnings;
|
||||
|
||||
@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
|
||||
|
||||
#define QED_RESULTS_BUF_MIN_SIZE 16
|
||||
/* Generic function for decoding debug feature info */
|
||||
enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
|
||||
enum qed_dbg_features feature_idx)
|
||||
static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
|
||||
enum qed_dbg_features feature_idx)
|
||||
{
|
||||
struct qed_dbg_feature *feature =
|
||||
&p_hwfn->cdev->dbg_params.features[feature_idx];
|
||||
@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
/* Generic function for performing the dump of a debug feature. */
|
||||
enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
enum qed_dbg_features feature_idx)
|
||||
static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
enum qed_dbg_features feature_idx)
|
||||
{
|
||||
struct qed_dbg_feature *feature =
|
||||
&p_hwfn->cdev->dbg_params.features[feature_idx];
|
||||
|
@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
|
||||
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
|
||||
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
|
||||
PROTOCOLID_ROCE,
|
||||
0) * 2;
|
||||
NULL) * 2;
|
||||
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
|
||||
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
|
||||
num_cons =
|
||||
qed_cxt_get_proto_cid_count(p_hwfn,
|
||||
PROTOCOLID_ISCSI, 0);
|
||||
PROTOCOLID_ISCSI,
|
||||
NULL);
|
||||
n_eqes += 2 * num_cons;
|
||||
}
|
||||
|
||||
@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
|
||||
u32 *feat_num = p_hwfn->hw_info.feat_num;
|
||||
int num_features = 1;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
|
||||
* status blocks equally between L2 / RoCE but with consideration as
|
||||
* to how many l2 queues / cnqs we have
|
||||
*/
|
||||
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
|
||||
if (IS_ENABLED(CONFIG_QED_RDMA) &&
|
||||
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
|
||||
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
|
||||
* the status blocks equally between L2 / RoCE but with
|
||||
* consideration as to how many l2 queues / cnqs we have.
|
||||
*/
|
||||
num_features++;
|
||||
|
||||
feat_num[QED_RDMA_CNQ] =
|
||||
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
|
||||
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
|
||||
}
|
||||
#endif
|
||||
|
||||
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
|
||||
num_features,
|
||||
RESC_NUM(p_hwfn, QED_L2_QUEUE));
|
||||
|
@@ -38,6 +38,7 @@
|
||||
#include "qed_mcp.h"
|
||||
#include "qed_reg_addr.h"
|
||||
#include "qed_sp.h"
|
||||
#include "qed_roce.h"
|
||||
|
||||
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
|
||||
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
|
||||
@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
|
||||
qed_ll2_dealloc_buffer(cdev, buffer);
|
||||
}
|
||||
|
||||
void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
struct qed_ll2_rx_packet *p_pkt,
|
||||
struct core_rx_fast_path_cqe *p_cqe,
|
||||
bool b_last_packet)
|
||||
static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
struct qed_ll2_rx_packet *p_pkt,
|
||||
struct core_rx_fast_path_cqe *p_cqe,
|
||||
bool b_last_packet)
|
||||
{
|
||||
u16 packet_length = le16_to_cpu(p_cqe->packet_length);
|
||||
struct qed_ll2_buffer *buffer = p_pkt->cookie;
|
||||
@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
||||
static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
||||
{
|
||||
struct qed_ll2_info *p_ll2_conn = NULL;
|
||||
struct qed_ll2_rx_packet *p_pkt = NULL;
|
||||
@@ -537,8 +538,7 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
|
||||
if (!p_pkt)
|
||||
break;
|
||||
|
||||
list_del(&p_pkt->list_entry);
|
||||
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
|
||||
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
|
||||
|
||||
rx_buf_addr = p_pkt->rx_buf_addr;
|
||||
cookie = p_pkt->cookie;
|
||||
@@ -992,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
|
||||
p_posting_packet = list_first_entry(&p_rx->posting_descq,
|
||||
struct qed_ll2_rx_packet,
|
||||
list_entry);
|
||||
list_del(&p_posting_packet->list_entry);
|
||||
list_add_tail(&p_posting_packet->list_entry,
|
||||
&p_rx->active_descq);
|
||||
list_move_tail(&p_posting_packet->list_entry,
|
||||
&p_rx->active_descq);
|
||||
b_notify_fw = true;
|
||||
}
|
||||
|
||||
@@ -1123,9 +1122,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
|
||||
DMA_REGPAIR_LE(start_bd->addr, first_frag);
|
||||
start_bd->nbytes = cpu_to_le16(first_frag_len);
|
||||
|
||||
SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
|
||||
type);
|
||||
|
||||
DP_VERBOSE(p_hwfn,
|
||||
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
|
||||
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
|
||||
@@ -1188,8 +1184,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
|
||||
if (!p_pkt)
|
||||
break;
|
||||
|
||||
list_del(&p_pkt->list_entry);
|
||||
list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
|
||||
list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
|
||||
}
|
||||
|
||||
SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
|
||||
|
@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
|
||||
*/
|
||||
void qed_ll2_free(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_ll2_connections);
|
||||
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t rx_buf_addr,
|
||||
u16 data_length,
|
||||
u8 data_length_error,
|
||||
u16 parse_flags,
|
||||
u16 vlan,
|
||||
u32 src_mac_addr_hi,
|
||||
u16 src_mac_addr_lo, bool b_last_packet);
|
||||
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
#endif
|
||||
|
@@ -33,10 +33,8 @@
|
||||
#include "qed_hw.h"
|
||||
#include "qed_selftest.h"
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
#define QED_ROCE_QPS (8192)
|
||||
#define QED_ROCE_DPIS (8)
|
||||
#endif
|
||||
|
||||
static char version[] =
|
||||
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
|
||||
@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
|
||||
enum qed_int_mode int_mode)
|
||||
{
|
||||
struct qed_sb_cnt_info sb_cnt_info;
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
int num_l2_queues;
|
||||
#endif
|
||||
int num_l2_queues = 0;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
|
||||
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
|
||||
cdev->num_hwfns;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
num_l2_queues = 0;
|
||||
if (!IS_ENABLED(CONFIG_QED_RDMA))
|
||||
return 0;
|
||||
|
||||
for_each_hwfn(cdev, i)
|
||||
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
|
||||
|
||||
@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
|
||||
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
|
||||
cdev->int_params.rdma_msix_cnt,
|
||||
cdev->int_params.rdma_msix_base);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
|
||||
{
|
||||
int i;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
|
||||
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
|
||||
/* divide by 3 the MRs to avoid MF ILT overflow */
|
||||
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
|
||||
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
|
||||
#endif
|
||||
for (i = 0; i < cdev->num_hwfns; i++) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
|
||||
p_hwfn->pf_params = *params;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_QED_RDMA))
|
||||
return;
|
||||
|
||||
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
|
||||
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
|
||||
/* divide by 3 the MRs to avoid MF ILT overflow */
|
||||
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
|
||||
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
|
||||
}
|
||||
|
||||
static int qed_slowpath_start(struct qed_dev *cdev,
|
||||
@@ -880,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
|
||||
}
|
||||
}
|
||||
|
||||
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
|
||||
rc = qed_nic_setup(cdev);
|
||||
if (rc)
|
||||
goto err;
|
||||
@@ -1432,7 +1431,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
|
||||
return status;
|
||||
}
|
||||
|
||||
struct qed_selftest_ops qed_selftest_ops_pass = {
|
||||
static struct qed_selftest_ops qed_selftest_ops_pass = {
|
||||
.selftest_memory = &qed_selftest_memory,
|
||||
.selftest_interrupt = &qed_selftest_interrupt,
|
||||
.selftest_register = &qed_selftest_register,
|
||||
|
@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
}
|
||||
|
||||
u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
|
||||
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
|
||||
{
|
||||
/* First sb id for RoCE is after all the l2 sb */
|
||||
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
|
||||
}
|
||||
|
||||
u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
|
||||
{
|
||||
return QED_CAU_DEF_RX_TIMER_RES;
|
||||
}
|
||||
|
||||
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct qed_rdma_start_in_params *params)
|
||||
@@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
|
||||
p_hwfn->p_rdma_info = p_rdma_info;
|
||||
p_rdma_info->proto = PROTOCOLID_ROCE;
|
||||
|
||||
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
|
||||
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
|
||||
NULL);
|
||||
|
||||
p_rdma_info->num_qps = num_cons / 2;
|
||||
|
||||
@@ -275,7 +271,7 @@ free_rdma_info:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
|
||||
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
|
||||
|
||||
@@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
int rc;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
|
||||
|
||||
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
rc = qed_rdma_bmap_alloc_id(p_hwfn,
|
||||
&p_hwfn->p_rdma_info->tid_map, itid);
|
||||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
|
||||
out:
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
|
||||
@@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
|
||||
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
|
||||
}
|
||||
|
||||
int qed_rdma_stop(void *rdma_cxt)
|
||||
static int qed_rdma_stop(void *rdma_cxt)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct rdma_close_func_ramrod_data *p_ramrod;
|
||||
@@ -629,8 +645,8 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_add_user(void *rdma_cxt,
|
||||
struct qed_rdma_add_user_out_params *out_params)
|
||||
static int qed_rdma_add_user(void *rdma_cxt,
|
||||
struct qed_rdma_add_user_out_params *out_params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
u32 dpi_start_offset;
|
||||
@@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt,
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
|
||||
static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
|
||||
@@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
|
||||
return p_port;
|
||||
}
|
||||
|
||||
struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
|
||||
static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
|
||||
@@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
|
||||
return p_hwfn->p_rdma_info->dev;
|
||||
}
|
||||
|
||||
void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
|
||||
static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
|
||||
@@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
|
||||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
}
|
||||
|
||||
int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
int rc;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
|
||||
|
||||
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
rc = qed_rdma_bmap_alloc_id(p_hwfn,
|
||||
&p_hwfn->p_rdma_info->tid_map, itid);
|
||||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
|
||||
out:
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
|
||||
static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn;
|
||||
u16 qz_num;
|
||||
@@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
|
||||
static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
u32 returned_id;
|
||||
@@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
|
||||
static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
|
||||
@@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
|
||||
return toggle_bit;
|
||||
}
|
||||
|
||||
int qed_rdma_create_cq(void *rdma_cxt,
|
||||
struct qed_rdma_create_cq_in_params *params, u16 *icid)
|
||||
static int qed_rdma_create_cq(void *rdma_cxt,
|
||||
struct qed_rdma_create_cq_in_params *params,
|
||||
u16 *icid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
|
||||
@@ -957,98 +954,10 @@ err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_resize_cq(void *rdma_cxt,
|
||||
struct qed_rdma_resize_cq_in_params *in_params,
|
||||
struct qed_rdma_resize_cq_out_params *out_params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct rdma_resize_cq_output_params *p_ramrod_res;
|
||||
struct rdma_resize_cq_ramrod_data *p_ramrod;
|
||||
enum qed_rdma_toggle_bit toggle_bit;
|
||||
struct qed_sp_init_data init_data;
|
||||
struct qed_spq_entry *p_ent;
|
||||
dma_addr_t ramrod_res_phys;
|
||||
u8 fw_return_code;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
|
||||
|
||||
p_ramrod_res =
|
||||
(struct rdma_resize_cq_output_params *)
|
||||
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(struct rdma_resize_cq_output_params),
|
||||
&ramrod_res_phys, GFP_KERNEL);
|
||||
if (!p_ramrod_res) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"qed resize cq failed: cannot allocate memory (ramrod)\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
init_data.cid = in_params->icid;
|
||||
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
|
||||
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
|
||||
|
||||
rc = qed_sp_init_request(p_hwfn, &p_ent,
|
||||
RDMA_RAMROD_RESIZE_CQ,
|
||||
p_hwfn->p_rdma_info->proto, &init_data);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.rdma_resize_cq;
|
||||
|
||||
p_ramrod->flags = 0;
|
||||
|
||||
/* toggle the bit for every resize or create cq for a given icid */
|
||||
toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
|
||||
in_params->icid);
|
||||
|
||||
SET_FIELD(p_ramrod->flags,
|
||||
RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
|
||||
|
||||
SET_FIELD(p_ramrod->flags,
|
||||
RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
|
||||
in_params->pbl_two_level);
|
||||
|
||||
p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
|
||||
p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
|
||||
p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
|
||||
DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
|
||||
DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
|
||||
|
||||
rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
if (fw_return_code != RDMA_RETURN_OK) {
|
||||
DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
|
||||
out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
|
||||
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(struct rdma_resize_cq_output_params),
|
||||
p_ramrod_res, ramrod_res_phys);
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
|
||||
|
||||
return rc;
|
||||
|
||||
err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(struct rdma_resize_cq_output_params),
|
||||
p_ramrod_res, ramrod_res_phys);
|
||||
DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_destroy_cq(void *rdma_cxt,
|
||||
struct qed_rdma_destroy_cq_in_params *in_params,
|
||||
struct qed_rdma_destroy_cq_out_params *out_params)
|
||||
static int
|
||||
qed_rdma_destroy_cq(void *rdma_cxt,
|
||||
struct qed_rdma_destroy_cq_in_params *in_params,
|
||||
struct qed_rdma_destroy_cq_out_params *out_params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct rdma_destroy_cq_output_params *p_ramrod_res;
|
||||
@@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
|
||||
return flavor;
|
||||
}
|
||||
|
||||
int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
|
||||
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
|
||||
{
|
||||
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
|
||||
u32 responder_icid;
|
||||
@@ -1793,9 +1702,9 @@ err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *out_params)
|
||||
static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *out_params)
|
||||
{
|
||||
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
|
||||
struct roce_query_qp_req_output_params *p_req_ramrod_res;
|
||||
@@ -1936,7 +1845,7 @@ err_resp:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
|
||||
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
|
||||
{
|
||||
u32 num_invalidated_mw = 0;
|
||||
u32 num_bound_mw = 0;
|
||||
@@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qed_rdma_query_qp(void *rdma_cxt,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *out_params)
|
||||
static int qed_rdma_query_qp(void *rdma_cxt,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *out_params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
int rc;
|
||||
@@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
|
||||
static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
int rc = 0;
|
||||
@@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct qed_rdma_qp *
|
||||
static struct qed_rdma_qp *
|
||||
qed_rdma_create_qp(void *rdma_cxt,
|
||||
struct qed_rdma_create_qp_in_params *in_params,
|
||||
struct qed_rdma_create_qp_out_params *out_params)
|
||||
@@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_modify_qp(void *rdma_cxt,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_modify_qp_in_params *params)
|
||||
static int qed_rdma_modify_qp(void *rdma_cxt,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_modify_qp_in_params *params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
enum qed_roce_qp_state prev_state;
|
||||
@@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_register_tid(void *rdma_cxt,
|
||||
struct qed_rdma_register_tid_in_params *params)
|
||||
static int
|
||||
qed_rdma_register_tid(void *rdma_cxt,
|
||||
struct qed_rdma_register_tid_in_params *params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct rdma_register_tid_ramrod_data *p_ramrod;
|
||||
@@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
|
||||
static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct rdma_deregister_tid_ramrod_data *p_ramrod;
|
||||
@@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
qed_rdma_dpm_conf(p_hwfn, p_ptt);
|
||||
}
|
||||
|
||||
int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
|
||||
static int qed_rdma_start(void *rdma_cxt,
|
||||
struct qed_rdma_start_in_params *params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct qed_ptt *p_ptt;
|
||||
@@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
|
||||
return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
|
||||
}
|
||||
|
||||
void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
|
||||
static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
|
||||
@@ -2808,11 +2719,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev)
|
||||
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
|
||||
int rc;
|
||||
|
||||
if (!cdev) {
|
||||
DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
|
||||
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
|
||||
return -EINVAL;
|
||||
@@ -2849,7 +2755,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
if (!cdev || !pkt || !params) {
|
||||
if (!pkt || !params) {
|
||||
DP_ERR(cdev,
|
||||
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
|
||||
cdev, pkt, params);
|
||||
|
@@ -95,26 +95,6 @@ struct qed_rdma_info {
|
||||
enum protocol_type proto;
|
||||
};
|
||||
|
||||
struct qed_rdma_resize_cq_in_params {
|
||||
u16 icid;
|
||||
u32 cq_size;
|
||||
bool pbl_two_level;
|
||||
u64 pbl_ptr;
|
||||
u16 pbl_num_pages;
|
||||
u8 pbl_page_size_log;
|
||||
};
|
||||
|
||||
struct qed_rdma_resize_cq_out_params {
|
||||
u32 prod;
|
||||
u32 cons;
|
||||
};
|
||||
|
||||
struct qed_rdma_resize_cnq_in_params {
|
||||
u32 cnq_id;
|
||||
u32 pbl_page_size_log;
|
||||
u64 pbl_ptr;
|
||||
};
|
||||
|
||||
struct qed_rdma_qp {
|
||||
struct regpair qp_handle;
|
||||
struct regpair qp_handle_async;
|
||||
@@ -181,36 +161,55 @@ struct qed_rdma_qp {
|
||||
dma_addr_t shared_queue_phys_addr;
|
||||
};
|
||||
|
||||
int
|
||||
qed_rdma_add_user(void *rdma_cxt,
|
||||
struct qed_rdma_add_user_out_params *out_params);
|
||||
int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
|
||||
int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
|
||||
int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
|
||||
void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
|
||||
struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
|
||||
struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
|
||||
int
|
||||
qed_rdma_register_tid(void *rdma_cxt,
|
||||
struct qed_rdma_register_tid_in_params *params);
|
||||
void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
|
||||
int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
|
||||
int qed_rdma_stop(void *rdma_cxt);
|
||||
u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
|
||||
u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
|
||||
void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
|
||||
void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
|
||||
struct event_ring_entry *p_eqe);
|
||||
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
|
||||
int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_modify_qp_in_params *params);
|
||||
int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *out_params);
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t rx_buf_addr,
|
||||
u16 data_length,
|
||||
u8 data_length_error,
|
||||
u16 parse_flags,
|
||||
u16 vlan,
|
||||
u32 src_mac_addr_hi,
|
||||
u16 src_mac_addr_lo, bool b_last_packet);
|
||||
#else
|
||||
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
|
||||
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
|
||||
static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
|
||||
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment,
|
||||
bool b_last_packet) {}
|
||||
static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment,
|
||||
bool b_last_packet) {}
|
||||
static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t rx_buf_addr,
|
||||
u16 data_length,
|
||||
u8 data_length_error,
|
||||
u16 parse_flags,
|
||||
u16 vlan,
|
||||
u32 src_mac_addr_hi,
|
||||
u16 src_mac_addr_lo,
|
||||
bool b_last_packet) {}
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -80,7 +80,6 @@ union ramrod_data {
|
||||
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
|
||||
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
|
||||
struct rdma_create_cq_ramrod_data rdma_create_cq;
|
||||
struct rdma_resize_cq_ramrod_data rdma_resize_cq;
|
||||
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
|
||||
struct rdma_srq_create_ramrod_data rdma_create_srq;
|
||||
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
|
||||
|
@@ -28,9 +28,7 @@
|
||||
#include "qed_reg_addr.h"
|
||||
#include "qed_sp.h"
|
||||
#include "qed_sriov.h"
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
#include "qed_roce.h"
|
||||
#endif
|
||||
|
||||
/***************************************************************************
|
||||
* Structures & Definitions
|
||||
@@ -272,11 +270,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
|
||||
struct event_ring_entry *p_eqe)
|
||||
{
|
||||
switch (p_eqe->protocol_id) {
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
case PROTOCOLID_ROCE:
|
||||
qed_async_roce_event(p_hwfn, p_eqe);
|
||||
return 0;
|
||||
#endif
|
||||
case PROTOCOLID_COMMON:
|
||||
return qed_sriov_eqe_event(p_hwfn,
|
||||
p_eqe->opcode,
|
||||
|
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
|
||||
|
||||
qede-y := qede_main.o qede_ethtool.o
|
||||
qede-$(CONFIG_DCB) += qede_dcbnl.o
|
||||
qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
|
||||
qede-$(CONFIG_QED_RDMA) += qede_roce.o
|
||||
|
@@ -349,12 +349,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
|
||||
int qede_txq_has_work(struct qede_tx_queue *txq);
|
||||
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
|
||||
u8 count);
|
||||
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
|
||||
|
||||
#define RX_RING_SIZE_POW 13
|
||||
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
|
||||
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
|
||||
#define NUM_RX_BDS_MIN 128
|
||||
#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
|
||||
#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
|
||||
|
||||
#define TX_RING_SIZE_POW 13
|
||||
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
|
||||
|
@@ -748,6 +748,8 @@ static void qede_get_channels(struct net_device *dev,
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
|
||||
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
|
||||
channels->max_rx = QEDE_MAX_RSS_CNT(edev);
|
||||
channels->max_tx = QEDE_MAX_RSS_CNT(edev);
|
||||
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
|
||||
edev->fp_num_rx;
|
||||
channels->tx_count = edev->fp_num_tx;
|
||||
@@ -812,6 +814,13 @@ static int qede_set_channels(struct net_device *dev,
|
||||
edev->req_queues = count;
|
||||
edev->req_num_tx = channels->tx_count;
|
||||
edev->req_num_rx = channels->rx_count;
|
||||
/* Reset the indirection table if rx queue count is updated */
|
||||
if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
|
||||
edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
|
||||
memset(&edev->rss_params.rss_ind_table, 0,
|
||||
sizeof(edev->rss_params.rss_ind_table));
|
||||
}
|
||||
|
||||
if (netif_running(dev))
|
||||
qede_reload(edev, NULL, NULL);
|
||||
|
||||
@@ -1045,6 +1054,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
if (edev->dev_info.common.num_hwfns > 1) {
|
||||
DP_INFO(edev,
|
||||
"RSS configuration is not supported for 100G devices\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
@@ -1176,8 +1191,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
|
||||
}
|
||||
|
||||
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
|
||||
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
|
||||
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
|
||||
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
|
||||
BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
|
||||
txq->sw_tx_cons++;
|
||||
txq->sw_tx_ring[idx].skb = NULL;
|
||||
|
||||
@@ -1191,8 +1206,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
|
||||
struct qede_rx_queue *rxq = NULL;
|
||||
struct sw_rx_data *sw_rx_data;
|
||||
union eth_rx_cqe *cqe;
|
||||
int i, rc = 0;
|
||||
u8 *data_ptr;
|
||||
int i;
|
||||
|
||||
for_each_queue(i) {
|
||||
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
|
||||
@@ -1211,46 +1226,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
|
||||
* queue and that the loopback traffic is not IP.
|
||||
*/
|
||||
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
|
||||
if (qede_has_rx_work(rxq))
|
||||
if (!qede_has_rx_work(rxq)) {
|
||||
usleep_range(100, 200);
|
||||
continue;
|
||||
}
|
||||
|
||||
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
|
||||
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
|
||||
|
||||
/* Memory barrier to prevent the CPU from doing speculative
|
||||
* reads of CQE/BD before reading hw_comp_cons. If the CQE is
|
||||
* read before it is written by FW, then FW writes CQE and SB,
|
||||
* and then the CPU reads the hw_comp_cons, it will use an old
|
||||
* CQE.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
/* Get the CQE from the completion ring */
|
||||
cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
|
||||
|
||||
/* Get the data from the SW ring */
|
||||
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
|
||||
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
|
||||
fp_cqe = &cqe->fast_path_regular;
|
||||
len = le16_to_cpu(fp_cqe->len_on_first_bd);
|
||||
data_ptr = (u8 *)(page_address(sw_rx_data->data) +
|
||||
fp_cqe->placement_offset +
|
||||
sw_rx_data->page_offset);
|
||||
if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
|
||||
ether_addr_equal(data_ptr + ETH_ALEN,
|
||||
edev->ndev->dev_addr)) {
|
||||
for (i = ETH_HLEN; i < len; i++)
|
||||
if (data_ptr[i] != (unsigned char)(i & 0xff)) {
|
||||
rc = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
qede_recycle_rx_bd_ring(rxq, edev, 1);
|
||||
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
|
||||
break;
|
||||
usleep_range(100, 200);
|
||||
}
|
||||
|
||||
DP_INFO(edev, "Not the transmitted packet\n");
|
||||
qede_recycle_rx_bd_ring(rxq, edev, 1);
|
||||
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
|
||||
}
|
||||
|
||||
if (!qede_has_rx_work(rxq)) {
|
||||
if (i == QEDE_SELFTEST_POLL_COUNT) {
|
||||
DP_NOTICE(edev, "Failed to receive the traffic\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
|
||||
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
|
||||
qede_update_rx_prod(edev, rxq);
|
||||
|
||||
/* Memory barrier to prevent the CPU from doing speculative reads of CQE
|
||||
* / BD before reading hw_comp_cons. If the CQE is read before it is
|
||||
* written by FW, then FW writes CQE and SB, and then the CPU reads the
|
||||
* hw_comp_cons, it will use an old CQE.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
/* Get the CQE from the completion ring */
|
||||
cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
|
||||
|
||||
/* Get the data from the SW ring */
|
||||
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
|
||||
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
|
||||
fp_cqe = &cqe->fast_path_regular;
|
||||
len = le16_to_cpu(fp_cqe->len_on_first_bd);
|
||||
data_ptr = (u8 *)(page_address(sw_rx_data->data) +
|
||||
fp_cqe->placement_offset + sw_rx_data->page_offset);
|
||||
for (i = ETH_HLEN; i < len; i++)
|
||||
if (data_ptr[i] != (unsigned char)(i & 0xff)) {
|
||||
DP_NOTICE(edev, "Loopback test failed\n");
|
||||
qede_recycle_rx_bd_ring(rxq, edev, 1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
qede_recycle_rx_bd_ring(rxq, edev, 1);
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
|
||||
|
@@ -317,8 +317,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
|
||||
split_bd_len = BD_UNMAP_LEN(split);
|
||||
bds_consumed++;
|
||||
}
|
||||
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
|
||||
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
|
||||
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
|
||||
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
|
||||
|
||||
/* Unmap the data of the skb frags */
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
|
||||
@@ -363,8 +363,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
|
||||
nbd--;
|
||||
}
|
||||
|
||||
dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
|
||||
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
|
||||
dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
|
||||
BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
|
||||
|
||||
/* Unmap the data of the skb frags */
|
||||
for (i = 0; i < nbd; i++) {
|
||||
@@ -964,8 +964,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void qede_update_rx_prod(struct qede_dev *edev,
|
||||
struct qede_rx_queue *rxq)
|
||||
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
|
||||
{
|
||||
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
|
||||
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
|
||||
@@ -3006,7 +3005,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
|
||||
txq->num_tx_buffers = edev->q_num_tx_buffers;
|
||||
|
||||
/* Allocate the parallel driver ring for Tx buffers */
|
||||
size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
|
||||
size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
|
||||
txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
|
||||
if (!txq->sw_tx_ring) {
|
||||
DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
|
||||
@@ -3017,7 +3016,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
|
||||
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
|
||||
QED_CHAIN_MODE_PBL,
|
||||
QED_CHAIN_CNT_TYPE_U16,
|
||||
NUM_TX_BDS_MAX,
|
||||
TX_RING_SIZE,
|
||||
sizeof(*p_virt), &txq->tx_pbl);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
Reference in New Issue
Block a user