Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "A more active cycle than most of the recent past, with a few large, long discussed works this time. The RNBD block driver has been posted for nearly two years now, and flowing through RDMA due to it also introducing a new ULP. The removal of FMR has been a recurring discussion theme for a long time. And the usual smattering of features and bug fixes. Summary: - Various small driver bugs fixes in rxe, mlx5, hfi1, and efa - Continuing driver cleanups in bnxt_re, hns - Big cleanup of mlx5 QP creation flows - More consistent use of src port and flow label when LAG is used and a mlx5 implementation - Additional set of cleanups for IB CM - 'RNBD' network block driver and target. This is a network block RDMA device specific to ionos's cloud environment. It brings strong multipath and resiliency capabilities. - Accelerated IPoIB for HFI1 - QP/WQ/SRQ ioctl migration for uverbs, and support for multiple async fds - Support for exchanging the new IBTA defiend ECE data during RDMA CM exchanges - Removal of the very old and insecure FMR interface from all ULPs and drivers. FRWR should be preferred for at least a decade now" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (247 commits) RDMA/cm: Spurious WARNING triggered in cm_destroy_id() RDMA/mlx5: Return ECE DC support RDMA/mlx5: Don't rely on FW to set zeros in ECE response RDMA/mlx5: Return an error if copy_to_user fails IB/hfi1: Use free_netdev() in hfi1_netdev_free() RDMA/hns: Uninitialized variable in modify_qp_init_to_rtr() RDMA/core: Move and rename trace_cm_id_create() IB/hfi1: Fix hfi1_netdev_rx_init() error handling RDMA: Remove 'max_map_per_fmr' RDMA: Remove 'max_fmr' RDMA/core: Remove FMR device ops RDMA/rdmavt: Remove FMR memory registration RDMA/mthca: Remove FMR support for memory registration RDMA/mlx4: Remove FMR support for memory registration RDMA/i40iw: Remove FMR leftovers RDMA/bnxt_re: Remove FMR leftovers RDMA/mlx5: Remove FMR leftovers RDMA/core: Remove FMR pool API RDMA/rds: Remove FMR support for memory registration RDMA/srp: Remove support for FMR memory registration ...
This commit is contained in:
@@ -16,7 +16,8 @@ mlx5_ib-y := ah.o \
|
||||
qpc.o \
|
||||
restrack.o \
|
||||
srq.o \
|
||||
srq_cmd.o
|
||||
srq_cmd.o \
|
||||
wr.o
|
||||
|
||||
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
|
||||
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
|
||||
|
@@ -32,9 +32,28 @@
|
||||
|
||||
#include "mlx5_ib.h"
|
||||
|
||||
static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
|
||||
struct rdma_ah_attr *ah_attr)
|
||||
static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
|
||||
const struct rdma_ah_attr *ah_attr)
|
||||
{
|
||||
enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type;
|
||||
__be16 sport;
|
||||
|
||||
if ((gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
|
||||
(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
|
||||
(ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK))
|
||||
sport = cpu_to_be16(
|
||||
rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
|
||||
else
|
||||
sport = mlx5_get_roce_udp_sport_min(dev,
|
||||
ah_attr->grh.sgid_attr);
|
||||
|
||||
return sport;
|
||||
}
|
||||
|
||||
static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
|
||||
struct rdma_ah_init_attr *init_attr)
|
||||
{
|
||||
struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
|
||||
enum ib_gid_type gid_type;
|
||||
|
||||
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
|
||||
@@ -51,12 +70,15 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
|
||||
ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
|
||||
|
||||
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
|
||||
if (init_attr->xmit_slave)
|
||||
ah->xmit_port =
|
||||
mlx5_lag_get_slave_port(dev->mdev,
|
||||
init_attr->xmit_slave);
|
||||
gid_type = ah_attr->grh.sgid_attr->gid_type;
|
||||
|
||||
memcpy(ah->av.rmac, ah_attr->roce.dmac,
|
||||
sizeof(ah_attr->roce.dmac));
|
||||
ah->av.udp_sport =
|
||||
mlx5_get_roce_udp_sport(dev, ah_attr->grh.sgid_attr);
|
||||
ah->av.udp_sport = mlx5_ah_get_udp_sport(dev, ah_attr);
|
||||
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
|
||||
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
|
||||
#define MLX5_ECN_ENABLED BIT(1)
|
||||
@@ -68,10 +90,11 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
|
||||
}
|
||||
}
|
||||
|
||||
int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
|
||||
u32 flags, struct ib_udata *udata)
|
||||
int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
|
||||
struct ib_udata *udata)
|
||||
|
||||
{
|
||||
struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
|
||||
struct mlx5_ib_ah *ah = to_mah(ibah);
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibah->device);
|
||||
enum rdma_ah_attr_type ah_type = ah_attr->type;
|
||||
@@ -97,7 +120,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
create_ib_ah(dev, ah, ah_attr);
|
||||
create_ib_ah(dev, ah, init_attr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1,46 +1,19 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
||||
/*
|
||||
* Copyright (c) 2017, Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
* Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "cmd.h"
|
||||
|
||||
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
|
||||
int err;
|
||||
|
||||
MLX5_SET(query_special_contexts_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
|
||||
if (!err)
|
||||
*mkey = MLX5_GET(query_special_contexts_out, out,
|
||||
dump_fill_mkey);
|
||||
@@ -50,12 +23,12 @@ int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
|
||||
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
|
||||
int err;
|
||||
|
||||
MLX5_SET(query_special_contexts_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
|
||||
if (!err)
|
||||
*null_mkey = MLX5_GET(query_special_contexts_out, out,
|
||||
null_mkey);
|
||||
@@ -63,23 +36,15 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
|
||||
}
|
||||
|
||||
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
|
||||
void *out, int out_size)
|
||||
void *out)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
|
||||
u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
|
||||
|
||||
MLX5_SET(query_cong_params_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_CONG_PARAMS);
|
||||
MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
|
||||
}
|
||||
|
||||
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
|
||||
void *in, int in_size)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
|
||||
|
||||
return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
|
||||
return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
|
||||
}
|
||||
|
||||
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
|
||||
@@ -133,7 +98,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
|
||||
MLX5_SET64(alloc_memic_in, in, range_start_addr,
|
||||
hw_start_addr + (page_idx * PAGE_SIZE));
|
||||
|
||||
ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
|
||||
if (ret) {
|
||||
spin_lock(&dm->lock);
|
||||
bitmap_clear(dm->memic_alloc_pages,
|
||||
@@ -162,8 +127,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
|
||||
struct mlx5_core_dev *dev = dm->dev;
|
||||
u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
|
||||
u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_memic_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
|
||||
u64 start_page_idx;
|
||||
int err;
|
||||
|
||||
@@ -174,7 +138,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
|
||||
MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
|
||||
MLX5_SET(dealloc_memic_in, in, memic_size, length);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
@@ -198,49 +162,46 @@ int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
|
||||
|
||||
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
|
||||
MLX5_SET(destroy_tir_in, in, tirn, tirn);
|
||||
MLX5_SET(destroy_tir_in, in, uid, uid);
|
||||
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
mlx5_cmd_exec_in(dev, destroy_tir, in);
|
||||
}
|
||||
|
||||
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
|
||||
MLX5_SET(destroy_tis_in, in, tisn, tisn);
|
||||
MLX5_SET(destroy_tis_in, in, uid, uid);
|
||||
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
mlx5_cmd_exec_in(dev, destroy_tis, in);
|
||||
}
|
||||
|
||||
void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
|
||||
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
|
||||
MLX5_SET(destroy_rqt_in, in, uid, uid);
|
||||
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
mlx5_cmd_exec_in(dev, destroy_rqt, in);
|
||||
}
|
||||
|
||||
int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
|
||||
u16 uid)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
|
||||
int err;
|
||||
|
||||
MLX5_SET(alloc_transport_domain_in, in, opcode,
|
||||
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
|
||||
MLX5_SET(alloc_transport_domain_in, in, uid, uid);
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
|
||||
if (!err)
|
||||
*tdn = MLX5_GET(alloc_transport_domain_out, out,
|
||||
transport_domain);
|
||||
@@ -251,32 +212,29 @@ int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
|
||||
void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
|
||||
u16 uid)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
|
||||
|
||||
MLX5_SET(dealloc_transport_domain_in, in, opcode,
|
||||
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
|
||||
MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
|
||||
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
|
||||
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
|
||||
}
|
||||
|
||||
void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
|
||||
|
||||
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
|
||||
MLX5_SET(dealloc_pd_in, in, pd, pdn);
|
||||
MLX5_SET(dealloc_pd_in, in, uid, uid);
|
||||
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
mlx5_cmd_exec_in(dev, dealloc_pd, in);
|
||||
}
|
||||
|
||||
int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
|
||||
u32 qpn, u16 uid)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
|
||||
void *gid;
|
||||
|
||||
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
|
||||
@@ -284,14 +242,13 @@ int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
|
||||
MLX5_SET(attach_to_mcg_in, in, uid, uid);
|
||||
gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
|
||||
memcpy(gid, mgid, sizeof(*mgid));
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
|
||||
}
|
||||
|
||||
int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
|
||||
u32 qpn, u16 uid)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
|
||||
void *gid;
|
||||
|
||||
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
|
||||
@@ -299,18 +256,18 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
|
||||
MLX5_SET(detach_from_mcg_in, in, uid, uid);
|
||||
gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
|
||||
memcpy(gid, mgid, sizeof(*mgid));
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
|
||||
}
|
||||
|
||||
int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
|
||||
int err;
|
||||
|
||||
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
|
||||
MLX5_SET(alloc_xrcd_in, in, uid, uid);
|
||||
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
|
||||
if (!err)
|
||||
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
|
||||
return err;
|
||||
@@ -318,13 +275,12 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
|
||||
|
||||
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
|
||||
|
||||
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
|
||||
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
|
||||
MLX5_SET(dealloc_xrcd_in, in, uid, uid);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
|
||||
}
|
||||
|
||||
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
|
||||
@@ -350,7 +306,7 @@ int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
|
||||
data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
|
||||
memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
|
||||
|
||||
err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
|
||||
err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@@ -40,10 +40,8 @@
|
||||
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
|
||||
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
|
||||
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
|
||||
void *out, int out_size);
|
||||
void *out);
|
||||
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out);
|
||||
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
|
||||
void *in, int in_size);
|
||||
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
|
||||
u64 length, u32 alignment);
|
||||
void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
|
||||
|
@@ -290,7 +290,7 @@ static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
|
||||
|
||||
node = mlx5_ib_param_to_node(offset);
|
||||
|
||||
err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
|
||||
err = mlx5_cmd_query_cong_params(mdev, node, out);
|
||||
if (err)
|
||||
goto free;
|
||||
|
||||
@@ -339,7 +339,7 @@ static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
|
||||
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
|
||||
attr_mask);
|
||||
|
||||
err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
|
||||
err = mlx5_cmd_exec_in(dev->mdev, modify_cong_params, in);
|
||||
kvfree(in);
|
||||
alloc_err:
|
||||
mlx5_ib_put_native_port_mdev(dev, port_num + 1);
|
||||
|
@@ -495,6 +495,10 @@ static u64 devx_get_obj_id(const void *in)
|
||||
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
|
||||
MLX5_GET(rst2init_qp_in, in, qpn));
|
||||
break;
|
||||
case MLX5_CMD_OP_INIT2INIT_QP:
|
||||
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
|
||||
MLX5_GET(init2init_qp_in, in, qpn));
|
||||
break;
|
||||
case MLX5_CMD_OP_INIT2RTR_QP:
|
||||
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
|
||||
MLX5_GET(init2rtr_qp_in, in, qpn));
|
||||
@@ -615,7 +619,7 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
|
||||
enum ib_qp_type qp_type = qp->ibqp.qp_type;
|
||||
|
||||
if (qp_type == IB_QPT_RAW_PACKET ||
|
||||
(qp->flags & MLX5_IB_QP_UNDERLAY)) {
|
||||
(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
|
||||
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
|
||||
&qp->raw_packet_qp;
|
||||
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
|
||||
@@ -820,6 +824,7 @@ static bool devx_is_obj_modify_cmd(const void *in)
|
||||
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
|
||||
case MLX5_CMD_OP_RST2INIT_QP:
|
||||
case MLX5_CMD_OP_INIT2RTR_QP:
|
||||
case MLX5_CMD_OP_INIT2INIT_QP:
|
||||
case MLX5_CMD_OP_RTR2RTS_QP:
|
||||
case MLX5_CMD_OP_RTS2RTS_QP:
|
||||
case MLX5_CMD_OP_SQERR2RTS_QP:
|
||||
@@ -2217,14 +2222,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
|
||||
obj->mdev = dev->mdev;
|
||||
uobj->object = obj;
|
||||
devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
|
||||
if (err)
|
||||
goto err_umem_destroy;
|
||||
uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
|
||||
|
||||
return 0;
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
|
||||
sizeof(obj_id));
|
||||
return err;
|
||||
|
||||
err_umem_destroy:
|
||||
mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
|
||||
err_umem_release:
|
||||
ib_umem_release(obj->umem);
|
||||
err_obj_free:
|
||||
|
@@ -67,46 +67,41 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
|
||||
},
|
||||
};
|
||||
|
||||
#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
|
||||
static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
|
||||
struct uverbs_attr_bundle *attrs)
|
||||
static int get_dests(struct uverbs_attr_bundle *attrs,
|
||||
struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id,
|
||||
int *dest_type, struct ib_qp **qp, u32 *flags)
|
||||
{
|
||||
struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
|
||||
struct mlx5_ib_flow_handler *flow_handler;
|
||||
struct mlx5_ib_flow_matcher *fs_matcher;
|
||||
struct ib_uobject **arr_flow_actions;
|
||||
struct ib_uflow_resources *uflow_res;
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
void *devx_obj;
|
||||
int dest_id, dest_type;
|
||||
void *cmd_in;
|
||||
int inlen;
|
||||
bool dest_devx, dest_qp;
|
||||
struct ib_qp *qp = NULL;
|
||||
struct ib_uobject *uobj =
|
||||
uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
|
||||
struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
|
||||
int len, ret, i;
|
||||
u32 counter_id = 0;
|
||||
u32 *offset_attr;
|
||||
u32 offset = 0;
|
||||
void *devx_obj;
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
|
||||
dest_devx =
|
||||
uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
|
||||
dest_devx = uverbs_attr_is_valid(attrs,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
|
||||
dest_qp = uverbs_attr_is_valid(attrs,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
|
||||
|
||||
fs_matcher = uverbs_attr_get_obj(attrs,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
|
||||
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
|
||||
((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
|
||||
*flags = 0;
|
||||
err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS |
|
||||
MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Both flags are not allowed */
|
||||
if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS &&
|
||||
*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allow only DEVX object as dest when inserting to FDB */
|
||||
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
|
||||
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
|
||||
if (dest_devx && (dest_qp || *flags))
|
||||
return -EINVAL;
|
||||
else if (dest_qp && *flags)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Allow only DEVX object, drop as dest for FDB */
|
||||
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx ||
|
||||
(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
|
||||
@@ -114,43 +109,86 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
|
||||
((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
|
||||
return -EINVAL;
|
||||
|
||||
*qp = NULL;
|
||||
if (dest_devx) {
|
||||
devx_obj = uverbs_attr_get_obj(
|
||||
attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
|
||||
if (IS_ERR(devx_obj))
|
||||
return PTR_ERR(devx_obj);
|
||||
devx_obj =
|
||||
uverbs_attr_get_obj(attrs,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
|
||||
|
||||
/* Verify that the given DEVX object is a flow
|
||||
* steering destination.
|
||||
*/
|
||||
if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
|
||||
if (!mlx5_ib_devx_is_flow_dest(devx_obj, dest_id, dest_type))
|
||||
return -EINVAL;
|
||||
/* Allow only flow table as dest when inserting to FDB or RDMA_RX */
|
||||
if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
|
||||
fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
|
||||
dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
|
||||
*dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
|
||||
return -EINVAL;
|
||||
} else if (dest_qp) {
|
||||
struct mlx5_ib_qp *mqp;
|
||||
|
||||
qp = uverbs_attr_get_obj(attrs,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
|
||||
if (IS_ERR(qp))
|
||||
return PTR_ERR(qp);
|
||||
*qp = uverbs_attr_get_obj(attrs,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
|
||||
if (IS_ERR(*qp))
|
||||
return PTR_ERR(*qp);
|
||||
|
||||
if (qp->qp_type != IB_QPT_RAW_PACKET)
|
||||
if ((*qp)->qp_type != IB_QPT_RAW_PACKET)
|
||||
return -EINVAL;
|
||||
|
||||
mqp = to_mqp(qp);
|
||||
if (mqp->flags & MLX5_IB_QP_RSS)
|
||||
dest_id = mqp->rss_qp.tirn;
|
||||
mqp = to_mqp(*qp);
|
||||
if (mqp->is_rss)
|
||||
*dest_id = mqp->rss_qp.tirn;
|
||||
else
|
||||
dest_id = mqp->raw_packet_qp.rq.tirn;
|
||||
dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
|
||||
} else {
|
||||
dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
|
||||
*dest_id = mqp->raw_packet_qp.rq.tirn;
|
||||
*dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
|
||||
} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
|
||||
*dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
|
||||
}
|
||||
|
||||
if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
|
||||
fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
|
||||
static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
|
||||
struct uverbs_attr_bundle *attrs)
|
||||
{
|
||||
struct mlx5_flow_context flow_context = {.flow_tag =
|
||||
MLX5_FS_DEFAULT_FLOW_TAG};
|
||||
u32 *offset_attr, offset = 0, counter_id = 0;
|
||||
int dest_id, dest_type, inlen, len, ret, i;
|
||||
struct mlx5_ib_flow_handler *flow_handler;
|
||||
struct mlx5_ib_flow_matcher *fs_matcher;
|
||||
struct ib_uobject **arr_flow_actions;
|
||||
struct ib_uflow_resources *uflow_res;
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct ib_qp *qp = NULL;
|
||||
void *devx_obj, *cmd_in;
|
||||
struct ib_uobject *uobj;
|
||||
struct mlx5_ib_dev *dev;
|
||||
u32 flags;
|
||||
|
||||
if (!capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
|
||||
fs_matcher = uverbs_attr_get_obj(attrs,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
|
||||
uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
|
||||
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
|
||||
|
||||
if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS)
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
|
||||
|
||||
if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
|
||||
|
||||
len = uverbs_attr_get_uobjs_arr(attrs,
|
||||
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
|
||||
if (len) {
|
||||
@@ -180,10 +218,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
|
||||
}
|
||||
|
||||
if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
|
||||
fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
|
||||
return -EINVAL;
|
||||
|
||||
cmd_in = uverbs_attr_get_alloced_ptr(
|
||||
attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
|
||||
inlen = uverbs_attr_get_len(attrs,
|
||||
@@ -629,7 +663,10 @@ DECLARE_UVERBS_NAMED_METHOD(
|
||||
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
|
||||
UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
|
||||
UA_OPTIONAL,
|
||||
UA_ALLOC_AND_COPY));
|
||||
UA_ALLOC_AND_COPY),
|
||||
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
|
||||
enum mlx5_ib_create_flow_flags,
|
||||
UA_OPTIONAL));
|
||||
|
||||
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
|
||||
MLX5_IB_METHOD_DESTROY_FLOW,
|
||||
|
@@ -119,17 +119,15 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
|
||||
struct mlx5_ib_gsi_qp *gsi;
|
||||
struct ib_qp_init_attr hw_init_attr = *init_attr;
|
||||
const u8 port_num = init_attr->port_num;
|
||||
const int num_pkeys = pd->device->attrs.max_pkeys;
|
||||
const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
|
||||
int num_qps = 0;
|
||||
int ret;
|
||||
|
||||
mlx5_ib_dbg(dev, "creating GSI QP\n");
|
||||
|
||||
if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
|
||||
mlx5_ib_warn(dev,
|
||||
"invalid port number %d during GSI QP creation\n",
|
||||
port_num);
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (mlx5_ib_deth_sqpn_cap(dev)) {
|
||||
if (MLX5_CAP_GEN(dev->mdev,
|
||||
port_type) == MLX5_CAP_PORT_TYPE_IB)
|
||||
num_qps = pd->device->attrs.max_pkeys;
|
||||
else if (dev->lag_active)
|
||||
num_qps = MLX5_MAX_PORTS;
|
||||
}
|
||||
|
||||
gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
|
||||
@@ -270,7 +268,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
|
||||
}
|
||||
|
||||
static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
|
||||
u16 qp_index)
|
||||
u16 pkey_index)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(qp->device);
|
||||
struct ib_qp_attr attr;
|
||||
@@ -279,7 +277,7 @@ static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
|
||||
|
||||
mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
|
||||
attr.qp_state = IB_QPS_INIT;
|
||||
attr.pkey_index = qp_index;
|
||||
attr.pkey_index = pkey_index;
|
||||
attr.qkey = IB_QP1_QKEY;
|
||||
attr.port_num = gsi->port_num;
|
||||
ret = ib_modify_qp(qp, &attr, mask);
|
||||
@@ -313,12 +311,17 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
|
||||
{
|
||||
struct ib_device *device = gsi->rx_qp->device;
|
||||
struct mlx5_ib_dev *dev = to_mdev(device);
|
||||
int pkey_index = qp_index;
|
||||
struct mlx5_ib_qp *mqp;
|
||||
struct ib_qp *qp;
|
||||
unsigned long flags;
|
||||
u16 pkey;
|
||||
int ret;
|
||||
|
||||
ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
|
||||
if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
|
||||
pkey_index = 0;
|
||||
|
||||
ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey);
|
||||
if (ret) {
|
||||
mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
|
||||
gsi->port_num, qp_index);
|
||||
@@ -347,7 +350,10 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = modify_to_rts(gsi, qp, qp_index);
|
||||
mqp = to_mqp(qp);
|
||||
if (dev->lag_active)
|
||||
mqp->gsi_lag_port = qp_index + 1;
|
||||
ret = modify_to_rts(gsi, qp, pkey_index);
|
||||
if (ret)
|
||||
goto err_destroy_qp;
|
||||
|
||||
@@ -466,11 +472,15 @@ static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
|
||||
static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
|
||||
struct mlx5_ib_ah *ah = to_mah(wr->ah);
|
||||
int qp_index = wr->pkey_index;
|
||||
|
||||
if (!mlx5_ib_deth_sqpn_cap(dev))
|
||||
if (!gsi->num_qps)
|
||||
return gsi->rx_qp;
|
||||
|
||||
if (dev->lag_active && ah->xmit_port)
|
||||
qp_index = ah->xmit_port - 1;
|
||||
|
||||
if (qp_index >= gsi->num_qps)
|
||||
return NULL;
|
||||
|
||||
|
@@ -9,9 +9,9 @@
|
||||
#include <linux/mlx5/eswitch.h>
|
||||
#include "mlx5_ib.h"
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
extern const struct mlx5_ib_profile raw_eth_profile;
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
|
||||
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
|
||||
u16 vport_num);
|
||||
|
@@ -53,6 +53,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <rdma/ib_smi.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/lag.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include "mlx5_ib.h"
|
||||
@@ -60,6 +61,7 @@
|
||||
#include "cmd.h"
|
||||
#include "srq.h"
|
||||
#include "qp.h"
|
||||
#include "wr.h"
|
||||
#include <linux/mlx5/fs_helpers.h>
|
||||
#include <linux/mlx5/accel.h>
|
||||
#include <rdma/uverbs_std_types.h>
|
||||
@@ -70,17 +72,10 @@
|
||||
#define UVERBS_MODULE_NAME mlx5_ib
|
||||
#include <rdma/uverbs_named_ioctl.h>
|
||||
|
||||
#define DRIVER_NAME "mlx5_ib"
|
||||
#define DRIVER_VERSION "5.0-0"
|
||||
|
||||
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
|
||||
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
|
||||
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
static char mlx5_version[] =
|
||||
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
|
||||
DRIVER_VERSION "\n";
|
||||
|
||||
struct mlx5_ib_event_work {
|
||||
struct work_struct work;
|
||||
union {
|
||||
@@ -628,8 +623,8 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
|
||||
attr->index, NULL, NULL);
|
||||
}
|
||||
|
||||
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
|
||||
const struct ib_gid_attr *attr)
|
||||
__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
|
||||
const struct ib_gid_attr *attr)
|
||||
{
|
||||
if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
|
||||
return 0;
|
||||
@@ -1004,7 +999,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
|
||||
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
|
||||
props->max_mcast_grp;
|
||||
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
|
||||
props->max_ah = INT_MAX;
|
||||
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
|
||||
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
|
||||
@@ -1964,6 +1958,9 @@ uar_done:
|
||||
resp.response_length += sizeof(resp.dump_fill_mkey);
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, ece_support))
|
||||
resp.comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
|
||||
|
||||
err = ib_copy_to_udata(udata, &resp, resp.response_length);
|
||||
if (err)
|
||||
goto out_mdev;
|
||||
@@ -1974,7 +1971,7 @@ uar_done:
|
||||
context->lib_caps = req.lib_caps;
|
||||
print_lib_caps(dev, context->lib_caps);
|
||||
|
||||
if (dev->lag_active) {
|
||||
if (mlx5_ib_lag_should_assign_affinity(dev)) {
|
||||
u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
|
||||
|
||||
atomic_set(&context->tx_port_affinity,
|
||||
@@ -2561,7 +2558,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
struct mlx5_ib_alloc_pd_resp resp;
|
||||
int err;
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
|
||||
u16 uid = 0;
|
||||
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct mlx5_ib_ucontext, ibucontext);
|
||||
@@ -2569,8 +2566,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
uid = context ? context->devx_uid : 0;
|
||||
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
|
||||
MLX5_SET(alloc_pd_in, in, uid, uid);
|
||||
err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
|
||||
out, sizeof(out));
|
||||
err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -3944,7 +3940,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
||||
dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
|
||||
} else {
|
||||
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
|
||||
if (mqp->flags & MLX5_IB_QP_RSS)
|
||||
if (mqp->is_rss)
|
||||
dst->tir_num = mqp->rss_qp.tirn;
|
||||
else
|
||||
dst->tir_num = mqp->raw_packet_qp.rq.tirn;
|
||||
@@ -4199,18 +4195,17 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
|
||||
|
||||
if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
|
||||
dst[dst_num].type = dest_type;
|
||||
dst[dst_num].tir_num = dest_id;
|
||||
dst[dst_num++].tir_num = dest_id;
|
||||
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
} else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
|
||||
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
|
||||
dst[dst_num].ft_num = dest_id;
|
||||
dst[dst_num++].ft_num = dest_id;
|
||||
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
} else {
|
||||
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
|
||||
} else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
|
||||
dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
|
||||
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
}
|
||||
|
||||
dst_num++;
|
||||
|
||||
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
|
||||
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
|
||||
@@ -4420,7 +4415,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
||||
uid = ibqp->pd ?
|
||||
to_mpd(ibqp->pd)->uid : 0;
|
||||
|
||||
if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
|
||||
if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
|
||||
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@@ -6194,26 +6189,20 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
|
||||
mmap_offset = mlx5_entry_to_mmap_offset(entry);
|
||||
length = entry->rdma_entry.npages * PAGE_SIZE;
|
||||
uobj->object = entry;
|
||||
uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
|
||||
&mmap_offset, sizeof(mmap_offset));
|
||||
if (err)
|
||||
goto err;
|
||||
return err;
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
|
||||
&entry->page_idx, sizeof(entry->page_idx));
|
||||
if (err)
|
||||
goto err;
|
||||
return err;
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
|
||||
&length, sizeof(length));
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
rdma_user_mmap_entry_remove(&entry->rdma_entry);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -6327,26 +6316,20 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
|
||||
mmap_offset = mlx5_entry_to_mmap_offset(entry);
|
||||
length = entry->rdma_entry.npages * PAGE_SIZE;
|
||||
uobj->object = entry;
|
||||
uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
|
||||
&mmap_offset, sizeof(mmap_offset));
|
||||
if (err)
|
||||
goto err;
|
||||
return err;
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
|
||||
&entry->page_idx, sizeof(entry->page_idx));
|
||||
if (err)
|
||||
goto err;
|
||||
return err;
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
|
||||
&length, sizeof(length));
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
rdma_user_mmap_entry_remove(&entry->rdma_entry);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -6540,6 +6523,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||
dev->ib_dev.phys_port_cnt = dev->num_ports;
|
||||
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
|
||||
dev->ib_dev.dev.parent = mdev->device;
|
||||
dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
|
||||
|
||||
mutex_init(&dev->cap_mask_mutex);
|
||||
INIT_LIST_HEAD(&dev->qp_list);
|
||||
@@ -6629,8 +6613,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
|
||||
.modify_qp = mlx5_ib_modify_qp,
|
||||
.modify_srq = mlx5_ib_modify_srq,
|
||||
.poll_cq = mlx5_ib_poll_cq,
|
||||
.post_recv = mlx5_ib_post_recv,
|
||||
.post_send = mlx5_ib_post_send,
|
||||
.post_recv = mlx5_ib_post_recv_nodrain,
|
||||
.post_send = mlx5_ib_post_send_nodrain,
|
||||
.post_srq_recv = mlx5_ib_post_srq_recv,
|
||||
.process_mad = mlx5_ib_process_mad,
|
||||
.query_ah = mlx5_ib_query_ah,
|
||||
@@ -7131,6 +7115,8 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
|
||||
int err;
|
||||
int i;
|
||||
|
||||
dev->profile = profile;
|
||||
|
||||
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
|
||||
if (profile->stage[i].init) {
|
||||
err = profile->stage[i].init(dev);
|
||||
@@ -7139,7 +7125,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
dev->profile = profile;
|
||||
dev->ib_active = true;
|
||||
|
||||
return dev;
|
||||
@@ -7313,8 +7298,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
int port_type_cap;
|
||||
int num_ports;
|
||||
|
||||
printk_once(KERN_INFO "%s", mlx5_version);
|
||||
|
||||
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
||||
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
|
||||
if (!mlx5_core_mp_enabled(mdev))
|
||||
|
@@ -337,7 +337,6 @@ struct mlx5_ib_rwq {
|
||||
struct ib_umem *umem;
|
||||
size_t buf_size;
|
||||
unsigned int page_shift;
|
||||
int create_type;
|
||||
struct mlx5_db db;
|
||||
u32 user_index;
|
||||
u32 wqe_count;
|
||||
@@ -346,17 +345,6 @@ struct mlx5_ib_rwq {
|
||||
u32 create_flags; /* Use enum mlx5_ib_wq_flags */
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_QP_USER,
|
||||
MLX5_QP_KERNEL,
|
||||
MLX5_QP_EMPTY
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_WQ_USER,
|
||||
MLX5_WQ_KERNEL
|
||||
};
|
||||
|
||||
struct mlx5_ib_rwq_ind_table {
|
||||
struct ib_rwq_ind_table ib_rwq_ind_tbl;
|
||||
u32 rqtn;
|
||||
@@ -443,34 +431,37 @@ struct mlx5_ib_qp {
|
||||
/* serialize qp state modifications
|
||||
*/
|
||||
struct mutex mutex;
|
||||
/* cached variant of create_flags from struct ib_qp_init_attr */
|
||||
u32 flags;
|
||||
u8 port;
|
||||
u8 state;
|
||||
int wq_sig;
|
||||
int scat_cqe;
|
||||
int max_inline_data;
|
||||
struct mlx5_bf bf;
|
||||
int has_rq;
|
||||
u8 has_rq:1;
|
||||
u8 is_rss:1;
|
||||
|
||||
/* only for user space QPs. For kernel
|
||||
* we have it from the bf object
|
||||
*/
|
||||
int bfregn;
|
||||
|
||||
int create_type;
|
||||
|
||||
struct list_head qps_list;
|
||||
struct list_head cq_recv_list;
|
||||
struct list_head cq_send_list;
|
||||
struct mlx5_rate_limit rl;
|
||||
u32 underlay_qpn;
|
||||
u32 flags_en;
|
||||
/* storage for qp sub type when core qp type is IB_QPT_DRIVER */
|
||||
enum ib_qp_type qp_sub_type;
|
||||
/*
|
||||
* IB/core doesn't store low-level QP types, so
|
||||
* store both MLX and IBTA types in the field below.
|
||||
* IB_QPT_DRIVER will be break to DCI/DCT subtypes.
|
||||
*/
|
||||
enum ib_qp_type type;
|
||||
/* A flag to indicate if there's a new counter is configured
|
||||
* but not take effective
|
||||
*/
|
||||
u32 counter_pending;
|
||||
u16 gsi_lag_port;
|
||||
};
|
||||
|
||||
struct mlx5_ib_cq_buf {
|
||||
@@ -481,24 +472,6 @@ struct mlx5_ib_cq_buf {
|
||||
int nent;
|
||||
};
|
||||
|
||||
enum mlx5_ib_qp_flags {
|
||||
MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
|
||||
MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
|
||||
MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
|
||||
MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
|
||||
MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
|
||||
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
|
||||
/* QP uses 1 as its source QP number */
|
||||
MLX5_IB_QP_SQPN_QP1 = 1 << 6,
|
||||
MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
|
||||
MLX5_IB_QP_RSS = 1 << 8,
|
||||
MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
|
||||
MLX5_IB_QP_UNDERLAY = 1 << 10,
|
||||
MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
|
||||
MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
|
||||
MLX5_IB_QP_PACKET_BASED_CREDIT = 1 << 13,
|
||||
};
|
||||
|
||||
struct mlx5_umr_wr {
|
||||
struct ib_send_wr wr;
|
||||
u64 virt_addr;
|
||||
@@ -702,12 +675,6 @@ struct umr_common {
|
||||
struct semaphore sem;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_FMR_INVALID,
|
||||
MLX5_FMR_VALID,
|
||||
MLX5_FMR_BUSY,
|
||||
};
|
||||
|
||||
struct mlx5_cache_ent {
|
||||
struct list_head head;
|
||||
/* sync access to the cahce entry
|
||||
@@ -1181,7 +1148,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
|
||||
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
|
||||
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
|
||||
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
|
||||
int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
|
||||
int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
|
||||
void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
|
||||
@@ -1205,10 +1172,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
|
||||
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
|
||||
void mlx5_ib_drain_sq(struct ib_qp *qp);
|
||||
void mlx5_ib_drain_rq(struct ib_qp *qp);
|
||||
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr);
|
||||
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr);
|
||||
int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
|
||||
size_t buflen, size_t *bc);
|
||||
int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
|
||||
@@ -1284,8 +1247,6 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
|
||||
struct ib_port_attr *props);
|
||||
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
|
||||
struct ib_port_attr *props);
|
||||
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
|
||||
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
|
||||
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
||||
unsigned long max_page_shift,
|
||||
int *count, int *shift,
|
||||
@@ -1383,8 +1344,8 @@ int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
|
||||
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
|
||||
u64 guid, int type);
|
||||
|
||||
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
|
||||
const struct ib_gid_attr *attr);
|
||||
__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
|
||||
const struct ib_gid_attr *attr);
|
||||
|
||||
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
|
||||
void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
|
||||
@@ -1581,4 +1542,11 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
|
||||
|
||||
int mlx5_ib_enable_driver(struct ib_device *dev);
|
||||
int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
|
||||
|
||||
static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return dev->lag_active ||
|
||||
(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
|
||||
MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
|
||||
}
|
||||
#endif /* MLX5_IB_H */
|
||||
|
@@ -447,8 +447,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
|
||||
{
|
||||
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
|
||||
pfault->wqe.wq_num : pfault->token;
|
||||
u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
|
||||
u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = { };
|
||||
u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
|
||||
int err;
|
||||
|
||||
MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
|
||||
@@ -457,7 +456,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
|
||||
MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
|
||||
MLX5_SET(page_fault_resume_in, in, error, !!error);
|
||||
|
||||
err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
|
||||
if (err)
|
||||
mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
|
||||
wq_num, err);
|
||||
@@ -1136,8 +1135,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
|
||||
if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
|
||||
*wqe += sizeof(struct mlx5_wqe_xrc_seg);
|
||||
|
||||
if (qp->ibqp.qp_type == IB_QPT_UD ||
|
||||
qp->qp_sub_type == MLX5_IB_QPT_DCI) {
|
||||
if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
|
||||
av = *wqe;
|
||||
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
|
||||
*wqe += sizeof(struct mlx5_av);
|
||||
@@ -1190,7 +1188,7 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_wq *wq = &qp->rq;
|
||||
int wqe_size = 1 << wq->wqe_shift;
|
||||
|
||||
if (qp->wq_sig) {
|
||||
if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
|
||||
mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@@ -69,17 +69,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
|
||||
&pp_entry->index, sizeof(pp_entry->index));
|
||||
if (err)
|
||||
goto clean;
|
||||
|
||||
pp_entry->mdev = dev->mdev;
|
||||
uobj->object = pp_entry;
|
||||
return 0;
|
||||
uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
|
||||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
|
||||
&pp_entry->index, sizeof(pp_entry->index));
|
||||
return err;
|
||||
|
||||
clean:
|
||||
mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index);
|
||||
err:
|
||||
kfree(pp_entry);
|
||||
return err;
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -13,10 +13,10 @@ void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev);
|
||||
|
||||
int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp,
|
||||
u32 *in, int inlen, u32 *out, int outlen);
|
||||
int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
|
||||
u32 *in, int inlen);
|
||||
int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
|
||||
u32 *in, int inlen, u32 *out);
|
||||
int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
|
||||
void *qpc, struct mlx5_core_qp *qp);
|
||||
void *qpc, struct mlx5_core_qp *qp, u32 *ece);
|
||||
int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
|
||||
int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
|
||||
int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
|
||||
|
@@ -236,16 +236,16 @@ err_cmd:
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
|
||||
u32 *in, int inlen)
|
||||
int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
|
||||
u32 *in, int inlen, u32 *out)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
|
||||
u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
|
||||
int err;
|
||||
|
||||
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
|
||||
|
||||
err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
|
||||
err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
|
||||
MLX5_ST_SZ_BYTES(create_qp_out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -341,9 +341,30 @@ static void mbox_free(struct mbox_info *mbox)
|
||||
kfree(mbox->out);
|
||||
}
|
||||
|
||||
static int get_ece_from_mbox(void *out, u16 opcode)
|
||||
{
|
||||
int ece = 0;
|
||||
|
||||
switch (opcode) {
|
||||
case MLX5_CMD_OP_INIT2RTR_QP:
|
||||
ece = MLX5_GET(init2rtr_qp_out, out, ece);
|
||||
break;
|
||||
case MLX5_CMD_OP_RTR2RTS_QP:
|
||||
ece = MLX5_GET(rtr2rts_qp_out, out, ece);
|
||||
break;
|
||||
case MLX5_CMD_OP_RTS2RTS_QP:
|
||||
ece = MLX5_GET(rts2rts_qp_out, out, ece);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ece;
|
||||
}
|
||||
|
||||
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
|
||||
u32 opt_param_mask, void *qpc,
|
||||
struct mbox_info *mbox, u16 uid)
|
||||
struct mbox_info *mbox, u16 uid, u32 ece)
|
||||
{
|
||||
mbox->out = NULL;
|
||||
mbox->in = NULL;
|
||||
@@ -391,18 +412,21 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
|
||||
return -ENOMEM;
|
||||
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
|
||||
opt_param_mask, qpc, uid);
|
||||
MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
|
||||
break;
|
||||
case MLX5_CMD_OP_RTR2RTS_QP:
|
||||
if (MBOX_ALLOC(mbox, rtr2rts_qp))
|
||||
return -ENOMEM;
|
||||
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
|
||||
opt_param_mask, qpc, uid);
|
||||
MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
|
||||
break;
|
||||
case MLX5_CMD_OP_RTS2RTS_QP:
|
||||
if (MBOX_ALLOC(mbox, rts2rts_qp))
|
||||
return -ENOMEM;
|
||||
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
|
||||
opt_param_mask, qpc, uid);
|
||||
MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
|
||||
break;
|
||||
case MLX5_CMD_OP_SQERR2RTS_QP:
|
||||
if (MBOX_ALLOC(mbox, sqerr2rts_qp))
|
||||
@@ -423,18 +447,22 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
|
||||
}
|
||||
|
||||
int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
|
||||
void *qpc, struct mlx5_core_qp *qp)
|
||||
void *qpc, struct mlx5_core_qp *qp, u32 *ece)
|
||||
{
|
||||
struct mbox_info mbox;
|
||||
int err;
|
||||
|
||||
err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn,
|
||||
opt_param_mask, qpc, &mbox, qp->uid);
|
||||
err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
|
||||
qpc, &mbox, qp->uid, (ece) ? *ece : 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
|
||||
mbox.outlen);
|
||||
|
||||
if (ece)
|
||||
*ece = get_ece_from_mbox(mbox.out, opcode);
|
||||
|
||||
mbox_free(&mbox);
|
||||
return err;
|
||||
}
|
||||
|
@@ -310,12 +310,18 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
|
||||
srq->msrq.event = mlx5_ib_srq_event;
|
||||
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
|
||||
|
||||
if (udata)
|
||||
if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
|
||||
if (udata) {
|
||||
struct mlx5_ib_create_srq_resp resp = {
|
||||
.srqn = srq->msrq.srqn,
|
||||
};
|
||||
|
||||
if (ib_copy_to_udata(udata, &resp, min(udata->outlen,
|
||||
sizeof(resp)))) {
|
||||
mlx5_ib_dbg(dev, "copy to user failed\n");
|
||||
err = -EFAULT;
|
||||
goto err_core;
|
||||
}
|
||||
}
|
||||
|
||||
init_attr->attr.max_wr = srq->msrq.max - 1;
|
||||
|
||||
|
@@ -132,38 +132,33 @@ static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
|
||||
static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
|
||||
u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_srq_in, srq_in, opcode,
|
||||
MLX5_CMD_OP_DESTROY_SRQ);
|
||||
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
|
||||
MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
|
||||
MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
|
||||
MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
|
||||
MLX5_SET(destroy_srq_in, in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
|
||||
sizeof(srq_out));
|
||||
return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
|
||||
}
|
||||
|
||||
static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm, int is_srq)
|
||||
{
|
||||
u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
|
||||
u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
|
||||
|
||||
MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
|
||||
MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
|
||||
MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
|
||||
MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
|
||||
MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
|
||||
MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
|
||||
MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
|
||||
MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
|
||||
MLX5_SET(arm_rq_in, in, lwm, lwm);
|
||||
MLX5_SET(arm_rq_in, in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
|
||||
sizeof(srq_out));
|
||||
return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
|
||||
}
|
||||
|
||||
static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
{
|
||||
u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
|
||||
u32 *srq_out;
|
||||
void *srqc;
|
||||
int err;
|
||||
@@ -172,11 +167,9 @@ static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
if (!srq_out)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(query_srq_in, srq_in, opcode,
|
||||
MLX5_CMD_OP_QUERY_SRQ);
|
||||
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
|
||||
err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
|
||||
MLX5_ST_SZ_BYTES(query_srq_out));
|
||||
MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
|
||||
MLX5_SET(query_srq_in, in, srqn, srq->srqn);
|
||||
err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -234,39 +227,35 @@ out:
|
||||
static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_core_srq *srq)
|
||||
{
|
||||
u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
|
||||
u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
|
||||
MLX5_CMD_OP_DESTROY_XRC_SRQ);
|
||||
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
|
||||
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
|
||||
MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
|
||||
MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
|
||||
MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
|
||||
xrcsrq_out, sizeof(xrcsrq_out));
|
||||
return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
|
||||
}
|
||||
|
||||
static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
u16 lwm)
|
||||
{
|
||||
u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
|
||||
u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
|
||||
|
||||
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
|
||||
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
|
||||
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
|
||||
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
|
||||
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
|
||||
MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
|
||||
MLX5_SET(arm_xrc_srq_in, in, op_mod,
|
||||
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
|
||||
MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
|
||||
MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
|
||||
MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
|
||||
xrcsrq_out, sizeof(xrcsrq_out));
|
||||
return mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
|
||||
}
|
||||
|
||||
static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
{
|
||||
u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
|
||||
u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
|
||||
u32 *xrcsrq_out;
|
||||
void *xrc_srqc;
|
||||
int err;
|
||||
@@ -274,14 +263,11 @@ static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
|
||||
xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
|
||||
if (!xrcsrq_out)
|
||||
return -ENOMEM;
|
||||
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
|
||||
|
||||
MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
|
||||
MLX5_CMD_OP_QUERY_XRC_SRQ);
|
||||
MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
|
||||
MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
|
||||
MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
|
||||
|
||||
err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
|
||||
xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
|
||||
err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -341,13 +327,12 @@ out:
|
||||
|
||||
static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
|
||||
MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
|
||||
MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
|
||||
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
|
||||
}
|
||||
|
||||
static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
@@ -384,7 +369,7 @@ static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
|
||||
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
|
||||
|
||||
err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
|
||||
err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
|
||||
|
||||
out:
|
||||
kvfree(in);
|
||||
@@ -414,7 +399,7 @@ static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
|
||||
MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
|
||||
MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
|
||||
err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
|
||||
err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -477,36 +462,34 @@ static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
|
||||
static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
|
||||
u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
|
||||
|
||||
MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
|
||||
MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
|
||||
MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
|
||||
MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
|
||||
}
|
||||
|
||||
static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_core_srq *srq,
|
||||
u16 lwm)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
|
||||
|
||||
MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
|
||||
MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
|
||||
MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
|
||||
MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
|
||||
MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
|
||||
MLX5_SET(arm_rq_in, in, lwm, lwm);
|
||||
MLX5_SET(arm_rq_in, in, lwm, lwm);
|
||||
MLX5_SET(arm_rq_in, in, uid, srq->uid);
|
||||
|
||||
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
|
||||
return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
|
||||
}
|
||||
|
||||
static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
struct mlx5_srq_attr *out)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
|
||||
u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
|
||||
u32 *xrq_out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
|
||||
void *xrqc;
|
||||
@@ -519,7 +502,7 @@ static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
|
||||
MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
|
||||
MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
|
||||
|
||||
err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
|
||||
err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
1504
drivers/infiniband/hw/mlx5/wr.c
Normal file
1504
drivers/infiniband/hw/mlx5/wr.c
Normal file
File diff suppressed because it is too large
Load Diff
76
drivers/infiniband/hw/mlx5/wr.h
Normal file
76
drivers/infiniband/hw/mlx5/wr.h
Normal file
@@ -0,0 +1,76 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
|
||||
/*
|
||||
* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MLX5_IB_WR_H
|
||||
#define _MLX5_IB_WR_H
|
||||
|
||||
#include "mlx5_ib.h"
|
||||
|
||||
enum {
|
||||
MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
|
||||
};
|
||||
|
||||
struct mlx5_wqe_eth_pad {
|
||||
u8 rsvd0[16];
|
||||
};
|
||||
|
||||
|
||||
/* get_sq_edge - Get the next nearby edge.
|
||||
*
|
||||
* An 'edge' is defined as the first following address after the end
|
||||
* of the fragment or the SQ. Accordingly, during the WQE construction
|
||||
* which repetitively increases the pointer to write the next data, it
|
||||
* simply should check if it gets to an edge.
|
||||
*
|
||||
* @sq - SQ buffer.
|
||||
* @idx - Stride index in the SQ buffer.
|
||||
*
|
||||
* Return:
|
||||
* The new edge.
|
||||
*/
|
||||
static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
|
||||
{
|
||||
void *fragment_end;
|
||||
|
||||
fragment_end = mlx5_frag_buf_get_wqe
|
||||
(&sq->fbc,
|
||||
mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
|
||||
|
||||
return fragment_end + MLX5_SEND_WQE_BB;
|
||||
}
|
||||
|
||||
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr, bool drain);
|
||||
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr, bool drain);
|
||||
|
||||
static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
|
||||
const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
|
||||
}
|
||||
|
||||
static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
|
||||
const struct ib_send_wr *wr,
|
||||
const struct ib_send_wr **bad_wr)
|
||||
{
|
||||
return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
|
||||
}
|
||||
|
||||
static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
|
||||
const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
|
||||
}
|
||||
|
||||
static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
|
||||
const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
|
||||
}
|
||||
#endif /* _MLX5_IB_WR_H */
|
Viittaa uudesa ongelmassa
Block a user