Merge tag 'v4.18' into rdma.git for-next
Resolve merge conflicts from the -rc cycle against the rdma.git tree: Conflicts: drivers/infiniband/core/uverbs_cmd.c - New ifs added to ib_uverbs_ex_create_flow in -rc and for-next - Merge removal of file->ucontext in for-next with new code in -rc drivers/infiniband/core/uverbs_main.c - for-next removed code from ib_uverbs_write() that was modified in for-rc Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Цей коміт міститься в:
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
|
||||
{
|
||||
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
|
||||
|
||||
if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
|
||||
if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
|
||||
return -ENOMEM;
|
||||
|
||||
mhp->mpl[mhp->mpl_len++] = addr;
|
||||
|
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
ps->s_txreq = get_txreq(ps->dev, qp);
|
||||
if (IS_ERR(ps->s_txreq))
|
||||
if (!ps->s_txreq)
|
||||
goto bail_no_tx;
|
||||
|
||||
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
int middle = 0;
|
||||
|
||||
ps->s_txreq = get_txreq(ps->dev, qp);
|
||||
if (IS_ERR(ps->s_txreq))
|
||||
if (!ps->s_txreq)
|
||||
goto bail_no_tx;
|
||||
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
u32 lid;
|
||||
|
||||
ps->s_txreq = get_txreq(ps->dev, qp);
|
||||
if (IS_ERR(ps->s_txreq))
|
||||
if (!ps->s_txreq)
|
||||
goto bail_no_tx;
|
||||
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2016 - 2017 Intel Corporation.
|
||||
* Copyright(c) 2016 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
|
||||
struct rvt_qp *qp)
|
||||
__must_hold(&qp->s_lock)
|
||||
{
|
||||
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
|
||||
struct verbs_txreq *tx = NULL;
|
||||
|
||||
write_seqlock(&dev->txwait_lock);
|
||||
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2016 Intel Corporation.
|
||||
* Copyright(c) 2016 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
|
||||
if (unlikely(!tx)) {
|
||||
/* call slow path to get the lock */
|
||||
tx = __get_txreq(dev, qp);
|
||||
if (IS_ERR(tx))
|
||||
if (!tx)
|
||||
return tx;
|
||||
}
|
||||
tx->qp = qp;
|
||||
|
@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
||||
}
|
||||
|
||||
if (flags & IB_MR_REREG_ACCESS) {
|
||||
if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
|
||||
return -EPERM;
|
||||
if (ib_access_writable(mr_access_flags) &&
|
||||
!mmr->umem->writable) {
|
||||
err = -EPERM;
|
||||
goto release_mpt_entry;
|
||||
}
|
||||
|
||||
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
|
||||
convert_access(mr_access_flags));
|
||||
|
@@ -3208,8 +3208,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
|
||||
if (!mcounters->hw_cntrs_hndl) {
|
||||
mcounters->hw_cntrs_hndl = mlx5_fc_create(
|
||||
to_mdev(ibcounters->device)->mdev, false);
|
||||
if (!mcounters->hw_cntrs_hndl) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(mcounters->hw_cntrs_hndl)) {
|
||||
ret = PTR_ERR(mcounters->hw_cntrs_hndl);
|
||||
goto free;
|
||||
}
|
||||
hw_hndl = true;
|
||||
@@ -3556,29 +3556,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
|
||||
if (err) {
|
||||
kfree(ucmd);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
if (err)
|
||||
goto free_ucmd;
|
||||
}
|
||||
|
||||
if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
|
||||
err = -ENOMEM;
|
||||
goto free_ucmd;
|
||||
}
|
||||
|
||||
if (domain != IB_FLOW_DOMAIN_USER ||
|
||||
flow_attr->port > dev->num_ports ||
|
||||
(flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
|
||||
IB_FLOW_ATTR_FLAGS_EGRESS)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
IB_FLOW_ATTR_FLAGS_EGRESS))) {
|
||||
err = -EINVAL;
|
||||
goto free_ucmd;
|
||||
}
|
||||
|
||||
if (is_egress &&
|
||||
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
|
||||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
|
||||
return ERR_PTR(-EINVAL);
|
||||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
|
||||
err = -EINVAL;
|
||||
goto free_ucmd;
|
||||
}
|
||||
|
||||
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
|
||||
if (!dst)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (!dst) {
|
||||
err = -ENOMEM;
|
||||
goto free_ucmd;
|
||||
}
|
||||
|
||||
mutex_lock(&dev->flow_db->lock);
|
||||
|
||||
@@ -3647,8 +3653,8 @@ destroy_ft:
|
||||
unlock:
|
||||
mutex_unlock(&dev->flow_db->lock);
|
||||
kfree(dst);
|
||||
free_ucmd:
|
||||
kfree(ucmd);
|
||||
kfree(handler);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@@ -6343,7 +6349,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
|
||||
MLX5_CAP_GEN(mdev, num_vhca_ports));
|
||||
|
||||
if (MLX5_VPORT_MANAGER(mdev) &&
|
||||
if (MLX5_ESWITCH_MANAGER(mdev) &&
|
||||
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
|
||||
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
|
||||
|
||||
|
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
|
||||
|
||||
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
|
||||
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
|
||||
if (desc_size == 0 || srq->msrq.max_gs > desc_size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
|
||||
err = -EINVAL;
|
||||
goto err_srq;
|
||||
}
|
||||
desc_size = roundup_pow_of_two(desc_size);
|
||||
desc_size = max_t(size_t, 32, desc_size);
|
||||
if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
|
||||
err = -EINVAL;
|
||||
goto err_srq;
|
||||
}
|
||||
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
|
||||
sizeof(struct mlx5_wqe_data_seg);
|
||||
srq->msrq.wqe_shift = ilog2(desc_size);
|
||||
buf_size = srq->msrq.max * desc_size;
|
||||
if (buf_size < desc_size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (buf_size < desc_size) {
|
||||
err = -EINVAL;
|
||||
goto err_srq;
|
||||
}
|
||||
in.type = init_attr->srq_type;
|
||||
|
||||
if (pd->uobject)
|
||||
|
@@ -2288,6 +2288,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
}
|
||||
|
||||
if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (attr_mask & IB_QP_PATH_MTU) {
|
||||
if (attr->path_mtu < IB_MTU_256 ||
|
||||
attr->path_mtu > IB_MTU_4096) {
|
||||
|
Посилання в новій задачі
Заблокувати користувача