Merge branch 'mlx' into merge-test

此提交包含在:
Doug Ledford
2016-12-14 14:44:25 -05:00
當前提交 86ef0beaa0
共有 40 個檔案被更改,包括 876 行新增337 行删除

查看文件

@@ -64,7 +64,9 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
return &ah->ibah;
}
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
struct ib_udata *udata)
{
struct mlx5_ib_ah *ah;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -75,6 +77,27 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
if (ll == IB_LINK_LAYER_ETHERNET && udata) {
int err;
struct mlx5_ib_create_ah_resp resp = {};
u32 min_resp_len = offsetof(typeof(resp), dmac) +
sizeof(resp.dmac);
if (udata->outlen < min_resp_len)
return ERR_PTR(-EINVAL);
resp.response_length = min_resp_len;
err = ib_resolve_eth_dmac(pd->device, ah_attr);
if (err)
return ERR_PTR(err);
memcpy(resp.dmac, ah_attr->dmac, ETH_ALEN);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
return ERR_PTR(err);
}
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);

查看文件

@@ -731,7 +731,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int entries, u32 **cqb,
int *cqe_size, int *index, int *inlen)
{
struct mlx5_ib_create_cq ucmd;
struct mlx5_ib_create_cq ucmd = {};
size_t ucmdlen;
int page_shift;
__be64 *pas;
@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (err)
goto err_umem;
mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
&ncont, NULL);
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
@@ -792,8 +792,36 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->uuari.uars[0].index;
if (ucmd.cqe_comp_en == 1) {
if (unlikely((*cqe_size != 64) ||
!MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
err = -EOPNOTSUPP;
mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
*cqe_size);
goto err_cqb;
}
if (unlikely(!ucmd.cqe_comp_res_format ||
!(ucmd.cqe_comp_res_format <
MLX5_IB_CQE_RES_RESERVED) ||
(ucmd.cqe_comp_res_format &
(ucmd.cqe_comp_res_format - 1)))) {
err = -EOPNOTSUPP;
mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
ucmd.cqe_comp_res_format);
goto err_cqb;
}
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
MLX5_SET(cqc, cqc, mini_cqe_res_format,
ilog2(ucmd.cqe_comp_res_format));
}
return 0;
err_cqb:
kfree(cqb);
err_db:
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
@@ -1125,7 +1153,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
return err;
}
mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
npas, NULL);
cq->resize_umem = umem;

查看文件

@@ -127,7 +127,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
&& ibdev->ib_active) {
struct ib_event ibev = {0};
struct ib_event ibev = { };
ibev.device = &ibdev->ib_dev;
ibev.event = (event == NETDEV_UP) ?
@@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
int err = -ENOMEM;
int max_sq_desc;
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
@@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
sizeof(struct mlx5_wqe_raddr_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
@@ -643,6 +645,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
props->max_ah = INT_MAX;
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
@@ -669,6 +672,40 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
}
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
uhw->outlen)) {
resp.mlx5_ib_support_multi_pkt_send_wqes =
MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
resp.response_length +=
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
if (field_avail(typeof(resp), reserved, uhw->outlen))
resp.response_length += sizeof(resp.reserved);
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
resp.cqe_comp_caps.max_num =
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
resp.cqe_comp_caps.supported_format =
MLX5_IB_CQE_RES_FORMAT_HASH |
MLX5_IB_CQE_RES_FORMAT_CSUM;
resp.response_length += sizeof(resp.cqe_comp_caps);
}
if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
resp.packet_pacing_caps.qp_rate_limit_max =
MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
resp.packet_pacing_caps.qp_rate_limit_min =
MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
resp.packet_pacing_caps.supported_qpts |=
1 << IB_QPT_RAW_PACKET;
}
resp.response_length += sizeof(resp.packet_pacing_caps);
}
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -1093,7 +1130,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cqe_version);
if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
resp.response_length += sizeof(resp.cmds_supp_uhw);
}
@@ -1502,6 +1540,22 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
}
static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
bool inner)
{
if (inner) {
MLX5_SET(fte_match_set_misc,
misc_c, inner_ipv6_flow_label, mask);
MLX5_SET(fte_match_set_misc,
misc_v, inner_ipv6_flow_label, val);
} else {
MLX5_SET(fte_match_set_misc,
misc_c, outer_ipv6_flow_label, mask);
MLX5_SET(fte_match_set_misc,
misc_v, outer_ipv6_flow_label, val);
}
}
static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
{
MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
@@ -1515,6 +1569,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
#define LAST_IPV4_FIELD tos
#define LAST_IPV6_FIELD traffic_class
#define LAST_TCP_UDP_FIELD src_port
#define LAST_TUNNEL_FIELD tunnel_id
/* Field is the last supported field */
#define FIELDS_NOT_SUPPORTED(filter, field)\
@@ -1527,155 +1582,164 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
static int parse_flow_attr(u32 *match_c, u32 *match_v,
const union ib_flow_spec *ib_spec)
{
void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
misc_parameters);
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
misc_parameters);
void *headers_c;
void *headers_v;
switch (ib_spec->type) {
if (ib_spec->type & IB_FLOW_SPEC_INNER) {
headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
inner_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
inner_headers);
} else {
headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
}
switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
return -ENOTSUPP;
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
ib_spec->eth.mask.dst_mac);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
ib_spec->eth.val.dst_mac);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
ib_spec->eth.mask.src_mac);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
ib_spec->eth.val.src_mac);
if (ib_spec->eth.mask.vlan_tag) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
vlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_vid, ntohs(ib_spec->eth.val.vlan_tag));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_cfi,
ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_cfi,
ntohs(ib_spec->eth.val.vlan_tag) >> 12);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_prio,
ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
first_prio,
ntohs(ib_spec->eth.val.vlan_tag) >> 13);
}
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, ntohs(ib_spec->eth.mask.ether_type));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ntohs(ib_spec->eth.val.ether_type));
break;
case IB_FLOW_SPEC_IPV4:
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, 0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ETH_P_IP);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.mask.src_ip,
sizeof(ib_spec->ipv4.mask.src_ip));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.val.src_ip,
sizeof(ib_spec->ipv4.val.src_ip));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.mask.dst_ip,
sizeof(ib_spec->ipv4.mask.dst_ip));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ib_spec->ipv4.val.dst_ip,
sizeof(ib_spec->ipv4.val.dst_ip));
set_tos(outer_headers_c, outer_headers_v,
set_tos(headers_c, headers_v,
ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
set_proto(outer_headers_c, outer_headers_v,
set_proto(headers_c, headers_v,
ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
break;
case IB_FLOW_SPEC_IPV6:
if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
ethertype, 0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
ethertype, ETH_P_IPV6);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.mask.src_ip,
sizeof(ib_spec->ipv6.mask.src_ip));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.val.src_ip,
sizeof(ib_spec->ipv6.val.src_ip));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.mask.dst_ip,
sizeof(ib_spec->ipv6.mask.dst_ip));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&ib_spec->ipv6.val.dst_ip,
sizeof(ib_spec->ipv6.val.dst_ip));
set_tos(outer_headers_c, outer_headers_v,
set_tos(headers_c, headers_v,
ib_spec->ipv6.mask.traffic_class,
ib_spec->ipv6.val.traffic_class);
set_proto(outer_headers_c, outer_headers_v,
set_proto(headers_c, headers_v,
ib_spec->ipv6.mask.next_hdr,
ib_spec->ipv6.val.next_hdr);
MLX5_SET(fte_match_set_misc, misc_params_c,
outer_ipv6_flow_label,
ntohl(ib_spec->ipv6.mask.flow_label));
MLX5_SET(fte_match_set_misc, misc_params_v,
outer_ipv6_flow_label,
ntohl(ib_spec->ipv6.val.flow_label));
set_flow_label(misc_params_c, misc_params_v,
ntohl(ib_spec->ipv6.mask.flow_label),
ntohl(ib_spec->ipv6.val.flow_label),
ib_spec->type & IB_FLOW_SPEC_INNER);
break;
case IB_FLOW_SPEC_TCP:
if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
LAST_TCP_UDP_FIELD))
return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
0xff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
IPPROTO_TCP);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
ntohs(ib_spec->tcp_udp.mask.src_port));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
ntohs(ib_spec->tcp_udp.val.src_port));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
ntohs(ib_spec->tcp_udp.mask.dst_port));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
case IB_FLOW_SPEC_UDP:
@@ -1683,21 +1747,31 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
LAST_TCP_UDP_FIELD))
return -ENOTSUPP;
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
0xff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
IPPROTO_UDP);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
ntohs(ib_spec->tcp_udp.mask.src_port));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
ntohs(ib_spec->tcp_udp.val.src_port));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
ntohs(ib_spec->tcp_udp.mask.dst_port));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
ntohs(ib_spec->tcp_udp.val.dst_port));
break;
case IB_FLOW_SPEC_VXLAN_TUNNEL:
if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
LAST_TUNNEL_FIELD))
return -ENOTSUPP;
MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
ntohl(ib_spec->tunnel.mask.tunnel_id));
MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
ntohl(ib_spec->tunnel.val.tunnel_id));
break;
default:
return -EINVAL;
}
@@ -2718,6 +2792,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
int err;
err = mlx5_ib_query_port(ibdev, port_num, &attr);
@@ -2727,7 +2803,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = get_core_cap_flags(ibdev);
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
}
@@ -2741,7 +2818,7 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
@@ -2770,7 +2847,7 @@ err_destroy_vport_lag:
return err;
}
static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -2782,15 +2859,7 @@ static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
{
if (dev->roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->roce.nb);
dev->roce.nb.notifier_call = NULL;
}
}
static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
{
int err;
@@ -2801,28 +2870,51 @@ static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
return err;
}
err = mlx5_nic_vport_enable_roce(dev->mdev);
if (err)
goto err_unregister_netdevice_notifier;
return 0;
}
err = mlx5_roce_lag_init(dev);
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
{
if (dev->roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->roce.nb);
dev->roce.nb.notifier_call = NULL;
}
}
static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
{
int err;
err = mlx5_add_netdev_notifier(dev);
if (err)
return err;
if (MLX5_CAP_GEN(dev->mdev, roce)) {
err = mlx5_nic_vport_enable_roce(dev->mdev);
if (err)
goto err_unregister_netdevice_notifier;
}
err = mlx5_eth_lag_init(dev);
if (err)
goto err_disable_roce;
return 0;
err_disable_roce:
mlx5_nic_vport_disable_roce(dev->mdev);
if (MLX5_CAP_GEN(dev->mdev, roce))
mlx5_nic_vport_disable_roce(dev->mdev);
err_unregister_netdevice_notifier:
mlx5_remove_roce_notifier(dev);
mlx5_remove_netdev_notifier(dev);
return err;
}
static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
{
mlx5_roce_lag_cleanup(dev);
mlx5_nic_vport_disable_roce(dev->mdev);
mlx5_eth_lag_cleanup(dev);
if (MLX5_CAP_GEN(dev->mdev, roce))
mlx5_nic_vport_disable_roce(dev->mdev);
}
static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
@@ -2944,9 +3036,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
return NULL;
printk_once(KERN_INFO "%s", mlx5_version);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@@ -2992,6 +3081,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_CREATE_AH) |
(1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
(1ull << IB_USER_VERBS_CMD_REREG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
@@ -3014,7 +3105,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.uverbs_ex_cmd_mask =
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
@@ -3125,14 +3217,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
spin_lock_init(&dev->reset_flow_resource_lock);
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_roce(dev);
err = mlx5_enable_eth(dev);
if (err)
goto err_dealloc;
}
err = create_dev_resources(&dev->devr);
if (err)
goto err_disable_roce;
goto err_disable_eth;
err = mlx5_ib_odp_init_one(dev);
if (err)
@@ -3176,10 +3268,10 @@ err_odp:
err_rsrc:
destroy_dev_resources(&dev->devr);
err_disable_roce:
err_disable_eth:
if (ll == IB_LINK_LAYER_ETHERNET) {
mlx5_disable_roce(dev);
mlx5_remove_roce_notifier(dev);
mlx5_disable_eth(dev);
mlx5_remove_netdev_notifier(dev);
}
err_free_port:
@@ -3196,14 +3288,14 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_dev *dev = context;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
mlx5_remove_roce_notifier(dev);
mlx5_remove_netdev_notifier(dev);
ib_unregister_device(&dev->ib_dev);
mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
if (ll == IB_LINK_LAYER_ETHERNET)
mlx5_disable_roce(dev);
mlx5_disable_eth(dev);
kfree(dev->port);
ib_dealloc_device(&dev->ib_dev);
}

查看文件

@@ -37,12 +37,15 @@
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
* @max_page_shift: high limit for page_shift - 0 means no limit
* @count: number of PAGE_SIZE pages covered by umem
* @shift: page shift for the compound pages found in the region
* @ncont: number of compund pages
* @order: log2 of the number of compound pages
*/
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
unsigned long max_page_shift,
int *count, int *shift,
int *ncont, int *order)
{
unsigned long tmp;
@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
addr = addr >> page_shift;
tmp = (unsigned long)addr;
m = find_first_bit(&tmp, BITS_PER_LONG);
if (max_page_shift)
m = min_t(unsigned long, max_page_shift - page_shift, m);
skip = 1 << m;
mask = skip - 1;
i = 0;

查看文件

@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
#define MLX5_IB_DEFAULT_UIDX 0xffffff
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
enum {
MLX5_IB_MMAP_CMD_SHIFT = 8,
MLX5_IB_MMAP_CMD_MASK = 0xff,
@@ -387,6 +389,7 @@ struct mlx5_ib_qp {
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
u32 rate_limit;
};
struct mlx5_ib_cq_buf {
@@ -418,7 +421,7 @@ struct mlx5_umr_wr {
struct ib_pd *pd;
unsigned int page_shift;
unsigned int npages;
u32 length;
u64 length;
int access_flags;
u32 mkey;
};
@@ -737,7 +740,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
int mlx5_ib_destroy_ah(struct ib_ah *ah);
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
@@ -823,7 +827,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
unsigned long max_page_shift,
int *count, int *shift,
int *ncont, int *order);
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset, size_t num_pages,

查看文件

@@ -627,7 +627,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->order = i + 2;
ent->dev = dev;
if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
(mlx5_core_is_pf(dev->mdev)))
limit = dev->mdev->profile->mr_cache[i].limit;
else
limit = 0;
@@ -645,6 +646,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return 0;
}
static void wait_for_async_commands(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent;
int total = 0;
int i;
int j;
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
for (j = 0 ; j < 1000; j++) {
if (!ent->pending)
break;
msleep(50);
}
}
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
total += ent->pending;
}
if (total)
mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
else
mlx5_ib_warn(dev, "done with all pending requests\n");
}
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
{
int i;
@@ -658,6 +686,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
clean_keys(dev, i);
destroy_workqueue(dev->cache.wq);
wait_for_async_commands(dev);
del_timer_sync(&dev->delay_timer);
return 0;
@@ -815,29 +844,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
umrwr->mkey = key;
}
static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int access_flags, int *npages,
int *page_shift, int *ncont, int *order)
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int access_flags, struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
int *order)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
access_flags, 0);
if (IS_ERR(umem)) {
int err;
*umem = ib_umem_get(pd->uobject->context, start, length,
access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem);
if (err < 0) {
mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
return (void *)umem;
return err;
}
mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
page_shift, ncont, order);
if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n");
ib_umem_release(umem);
return ERR_PTR(-EINVAL);
ib_umem_release(*umem);
return -EINVAL;
}
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
*npages, *ncont, *order, *page_shift);
return umem;
return 0;
}
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1163,11 +1197,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
umem = mr_umem_get(pd, start, length, access_flags, &npages,
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
&page_shift, &ncont, &order);
if (IS_ERR(umem))
return (void *)umem;
if (err < 0)
return ERR_PTR(err);
if (use_umr(order)) {
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
@@ -1341,10 +1375,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
&page_shift, &ncont, &order);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err < 0) {
mr->umem = NULL;
return err;
}

查看文件

@@ -78,12 +78,14 @@ struct mlx5_wqe_eth_pad {
enum raw_qp_set_mask_map {
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
};
struct mlx5_modify_raw_qp_param {
u16 operation;
u32 set_mask; /* raw_qp_set_mask_map */
u32 rate_limit;
u8 rq_q_ctr_id;
};
@@ -352,6 +354,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
}
static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
{
int max_sge;
if (attr->qp_type == IB_QPT_RC)
max_sge = (min_t(int, wqe_size, 512) -
sizeof(struct mlx5_wqe_ctrl_seg) -
sizeof(struct mlx5_wqe_raddr_seg)) /
sizeof(struct mlx5_wqe_data_seg);
else if (attr->qp_type == IB_QPT_XRC_INI)
max_sge = (min_t(int, wqe_size, 512) -
sizeof(struct mlx5_wqe_ctrl_seg) -
sizeof(struct mlx5_wqe_xrc_seg) -
sizeof(struct mlx5_wqe_raddr_seg)) /
sizeof(struct mlx5_wqe_data_seg);
else
max_sge = (wqe_size - sq_overhead(attr)) /
sizeof(struct mlx5_wqe_data_seg);
return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
sizeof(struct mlx5_wqe_data_seg));
}
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
struct mlx5_ib_qp *qp)
{
@@ -382,13 +407,18 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
qp->sq.wqe_cnt,
1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.max_gs = attr->cap.max_send_sge;
qp->sq.max_gs = get_send_sge(attr, wqe_size);
if (qp->sq.max_gs < attr->cap.max_send_sge)
return -ENOMEM;
attr->cap.max_send_sge = qp->sq.max_gs;
qp->sq.max_post = wq_size / wqe_size;
attr->cap.max_send_wr = qp->sq.max_post;
@@ -648,7 +678,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
return PTR_ERR(*umem);
}
mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL);
mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
if (err) {
@@ -701,7 +731,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err;
}
mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
&ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
&rwq->rq_page_offset);
@@ -2443,8 +2473,14 @@ out:
}
static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
struct mlx5_ib_sq *sq, int new_state)
struct mlx5_ib_sq *sq,
int new_state,
const struct mlx5_modify_raw_qp_param *raw_qp_param)
{
struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
u32 old_rate = ibqp->rate_limit;
u32 new_rate = old_rate;
u16 rl_index = 0;
void *in;
void *sqc;
int inlen;
@@ -2460,10 +2496,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
MLX5_SET(sqc, sqc, state, new_state);
err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
if (err)
goto out;
if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
if (new_state != MLX5_SQC_STATE_RDY)
pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
__func__);
else
new_rate = raw_qp_param->rate_limit;
}
if (old_rate != new_rate) {
if (new_rate) {
err = mlx5_rl_add_rate(dev, new_rate, &rl_index);
if (err) {
pr_err("Failed configuring rate %u: %d\n",
new_rate, err);
goto out;
}
}
MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
}
err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
if (err) {
/* Remove new rate from table if failed */
if (new_rate &&
old_rate != new_rate)
mlx5_rl_remove_rate(dev, new_rate);
goto out;
}
/* Only remove the old rate after new rate was set */
if ((old_rate &&
(old_rate != new_rate)) ||
(new_state != MLX5_SQC_STATE_RDY))
mlx5_rl_remove_rate(dev, old_rate);
ibqp->rate_limit = new_rate;
sq->state = new_state;
out:
@@ -2478,6 +2548,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
int modify_rq = !!qp->rq.wqe_cnt;
int modify_sq = !!qp->sq.wqe_cnt;
int rq_state;
int sq_state;
int err;
@@ -2495,10 +2567,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq_state = MLX5_RQC_STATE_RST;
sq_state = MLX5_SQC_STATE_RST;
break;
case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_INIT2RTR_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
if (raw_qp_param->set_mask ==
MLX5_RAW_QP_RATE_LIMIT) {
modify_rq = 0;
sq_state = sq->state;
} else {
return raw_qp_param->set_mask ? -EINVAL : 0;
}
break;
case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_INIT2RTR_QP:
if (raw_qp_param->set_mask)
return -EINVAL;
else
@@ -2508,13 +2588,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EINVAL;
}
if (qp->rq.wqe_cnt) {
err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
if (modify_rq) {
err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
if (err)
return err;
}
if (qp->sq.wqe_cnt) {
if (modify_sq) {
if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
tx_affinity);
@@ -2522,7 +2602,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return err;
}
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
}
return 0;
@@ -2578,7 +2658,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar;
int sqd_event;
int mlx5_st;
int err;
u16 op;
@@ -2725,12 +2804,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->db_rec_addr = cpu_to_be64(qp->db.dma);
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
sqd_event = 1;
else
sqd_event = 0;
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
@@ -2777,6 +2850,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
}
if (attr_mask & IB_QP_RATE_LIMIT) {
raw_qp_param.rate_limit = attr->rate_limit;
raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
}
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
} else {
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
@@ -3068,10 +3147,10 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
{
memset(umr, 0, sizeof(*umr));
umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
umr->flags = 1 << 7;
umr->flags = MLX5_UMR_INLINE;
}
static __be64 get_umr_reg_mr_mask(void)
static __be64 get_umr_reg_mr_mask(int atomic)
{
u64 result;
@@ -3084,9 +3163,11 @@ static __be64 get_umr_reg_mr_mask(void)
MLX5_MKEY_MASK_KEY |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW |
MLX5_MKEY_MASK_A |
MLX5_MKEY_MASK_FREE;
if (atomic)
result |= MLX5_MKEY_MASK_A;
return cpu_to_be64(result);
}
@@ -3147,7 +3228,7 @@ static __be64 get_umr_update_pd_mask(void)
}
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
struct ib_send_wr *wr)
struct ib_send_wr *wr, int atomic)
{
struct mlx5_umr_wr *umrwr = umr_wr(wr);
@@ -3172,7 +3253,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
umr->mkey_mask |= get_umr_update_pd_mask();
if (!umr->mkey_mask)
umr->mkey_mask = get_umr_reg_mr_mask();
umr->mkey_mask = get_umr_reg_mr_mask(atomic);
} else {
umr->mkey_mask = get_umr_unreg_mr_mask();
}
@@ -4025,7 +4106,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
set_reg_umr_segment(seg, wr);
set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((seg == qend)))

查看文件

@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return err;
}
mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
&page_shift, &ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
&offset);
@@ -280,6 +280,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
in.type = init_attr->srq_type;
if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size);
@@ -292,7 +293,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
in.type = init_attr->srq_type;
in.log_size = ilog2(srq->msrq.max);
in.wqe_shift = srq->msrq.wqe_shift - 4;
if (srq->wq_sig)