Merge tag 'v5.3-rc8' into rdma.git for-next
To resolve dependencies in following patches mlx5_ib.h conflict resolved by keeing both hunks Linux 5.3-rc8 Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
size = req->cmd_size;
|
||||
/* change the cmd_size to the number of 16byte cmdq unit.
|
||||
* req->cmd_size is modified here
|
||||
*/
|
||||
bnxt_qplib_set_cmd_slots(req);
|
||||
|
||||
memset(resp, 0, sizeof(*resp));
|
||||
crsqe->resp = (struct creq_qp_event *)resp;
|
||||
crsqe->resp->cookie = req->cookie;
|
||||
@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
|
||||
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
|
||||
preq = (u8 *)req;
|
||||
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
|
||||
do {
|
||||
/* Locate the next cmdq slot */
|
||||
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
|
||||
|
@@ -55,9 +55,7 @@
|
||||
do { \
|
||||
memset(&(req), 0, sizeof((req))); \
|
||||
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
|
||||
(req).cmd_size = (sizeof((req)) + \
|
||||
BNXT_QPLIB_CMDQE_UNITS - 1) / \
|
||||
BNXT_QPLIB_CMDQE_UNITS; \
|
||||
(req).cmd_size = sizeof((req)); \
|
||||
(req).flags = cpu_to_le16(cmd_flags); \
|
||||
} while (0)
|
||||
|
||||
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
|
||||
BNXT_QPLIB_CMDQE_UNITS);
|
||||
}
|
||||
|
||||
/* Set the cmd_size to a factor of CMDQE unit */
|
||||
static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
|
||||
{
|
||||
req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
}
|
||||
|
||||
#define MAX_CMDQ_IDX(depth) ((depth) - 1)
|
||||
|
||||
static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
|
||||
|
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
copy = min(len, datalen - 1);
|
||||
if (copy_from_user(data, buf, copy))
|
||||
return -EFAULT;
|
||||
if (copy_from_user(data, buf, copy)) {
|
||||
ret = -EFAULT;
|
||||
goto free_data;
|
||||
}
|
||||
|
||||
ret = debugfs_file_get(file->f_path.dentry);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
goto free_data;
|
||||
ptr = data;
|
||||
token = ptr;
|
||||
for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
|
||||
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
|
||||
ret = len;
|
||||
|
||||
debugfs_file_put(file->f_path.dentry);
|
||||
free_data:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
|
||||
return -ENOMEM;
|
||||
ret = debugfs_file_get(file->f_path.dentry);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
goto free_data;
|
||||
bit = find_first_bit(fault->opcodes, bitsize);
|
||||
while (bit < bitsize) {
|
||||
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
|
||||
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
|
||||
data[size - 1] = '\n';
|
||||
data[size] = '\0';
|
||||
ret = simple_read_from_buffer(buf, len, pos, data, size);
|
||||
free_data:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
|
||||
hfi1_kern_clear_hw_flow(priv->rcd, qp);
|
||||
}
|
||||
|
||||
static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
|
||||
struct hfi1_packet *packet, u8 rcv_type,
|
||||
u8 opcode)
|
||||
static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
|
||||
{
|
||||
struct rvt_qp *qp = packet->qp;
|
||||
struct hfi1_qp_priv *qpriv = qp->priv;
|
||||
u32 ipsn;
|
||||
struct ib_other_headers *ohdr = packet->ohdr;
|
||||
struct rvt_ack_entry *e;
|
||||
struct tid_rdma_request *req;
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
||||
u32 i;
|
||||
|
||||
if (rcv_type >= RHF_RCV_TYPE_IB)
|
||||
goto done;
|
||||
@@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
|
||||
if (rcv_type == RHF_RCV_TYPE_EAGER) {
|
||||
hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
|
||||
hfi1_schedule_send(qp);
|
||||
goto done_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* For TID READ response, error out QP after freeing the tid
|
||||
* resources.
|
||||
*/
|
||||
if (opcode == TID_OP(READ_RESP)) {
|
||||
ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
|
||||
if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
|
||||
cmp_psn(ipsn, qp->s_psn) < 0) {
|
||||
hfi1_kern_read_tid_flow_free(qp);
|
||||
spin_unlock(&qp->s_lock);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
goto done;
|
||||
}
|
||||
goto done_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Error out the qp for TID RDMA WRITE
|
||||
*/
|
||||
hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
|
||||
for (i = 0; i < rvt_max_atomic(rdi); i++) {
|
||||
e = &qp->s_ack_queue[i];
|
||||
if (e->opcode == TID_OP(WRITE_REQ)) {
|
||||
req = ack_to_tid_req(e);
|
||||
hfi1_kern_exp_rcv_clear_all(req);
|
||||
}
|
||||
}
|
||||
spin_unlock(&qp->s_lock);
|
||||
rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
|
||||
goto done;
|
||||
|
||||
done_unlock:
|
||||
/* Since no payload is delivered, just drop the packet */
|
||||
spin_unlock(&qp->s_lock);
|
||||
done:
|
||||
return true;
|
||||
@@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
u32 fpsn;
|
||||
|
||||
lockdep_assert_held(&qp->r_lock);
|
||||
spin_lock(&qp->s_lock);
|
||||
/* If the psn is out of valid range, drop the packet */
|
||||
if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
|
||||
cmp_psn(ibpsn, qp->s_psn) > 0)
|
||||
return ret;
|
||||
goto s_unlock;
|
||||
|
||||
spin_lock(&qp->s_lock);
|
||||
/*
|
||||
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
|
||||
* requests and implicitly NAK RDMA read and atomic requests issued
|
||||
@@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
|
||||
wqe = do_rc_completion(qp, wqe, ibp);
|
||||
if (qp->s_acked == qp->s_tail)
|
||||
break;
|
||||
goto s_unlock;
|
||||
}
|
||||
|
||||
if (qp->s_acked == qp->s_tail)
|
||||
goto s_unlock;
|
||||
|
||||
/* Handle the eflags for the request */
|
||||
if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
|
||||
goto s_unlock;
|
||||
@@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
if (lnh == HFI1_LRH_GRH)
|
||||
goto r_unlock;
|
||||
|
||||
if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
|
||||
if (tid_rdma_tid_err(packet, rcv_type))
|
||||
goto r_unlock;
|
||||
}
|
||||
|
||||
@@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
*/
|
||||
spin_lock(&qp->s_lock);
|
||||
qpriv = qp->priv;
|
||||
if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
|
||||
qpriv->r_tid_tail == qpriv->r_tid_head)
|
||||
goto unlock;
|
||||
e = &qp->s_ack_queue[qpriv->r_tid_tail];
|
||||
if (e->opcode != TID_OP(WRITE_REQ))
|
||||
goto unlock;
|
||||
req = ack_to_tid_req(e);
|
||||
if (req->comp_seg == req->cur_seg)
|
||||
goto unlock;
|
||||
flow = &req->flows[req->clear_tail];
|
||||
trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
|
||||
trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
|
||||
@@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
|
||||
struct rvt_swqe *wqe;
|
||||
struct tid_rdma_request *req;
|
||||
struct tid_rdma_flow *flow;
|
||||
u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
|
||||
u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
|
||||
unsigned long flags;
|
||||
u16 fidx;
|
||||
|
||||
@@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
|
||||
ack_kpsn--;
|
||||
}
|
||||
|
||||
if (unlikely(qp->s_acked == qp->s_tail))
|
||||
goto ack_op_err;
|
||||
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
|
||||
|
||||
if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
|
||||
@@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
|
||||
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
|
||||
|
||||
/* Drop stale ACK/NAK */
|
||||
if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
|
||||
if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
|
||||
cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
|
||||
goto ack_op_err;
|
||||
|
||||
while (cmp_psn(ack_kpsn,
|
||||
@@ -4712,7 +4685,12 @@ done:
|
||||
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
|
||||
IB_AETH_CREDIT_MASK) {
|
||||
case 0: /* PSN sequence error */
|
||||
if (!req->flows)
|
||||
break;
|
||||
flow = &req->flows[req->acked_tail];
|
||||
flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
|
||||
if (cmp_psn(psn, flpsn) > 0)
|
||||
break;
|
||||
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
|
||||
flow);
|
||||
req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
|
||||
|
@@ -1677,8 +1677,6 @@ tx_err:
|
||||
tx_buf_size, DMA_TO_DEVICE);
|
||||
kfree(tun_qp->tx_ring[i].buf.addr);
|
||||
}
|
||||
kfree(tun_qp->tx_ring);
|
||||
tun_qp->tx_ring = NULL;
|
||||
i = MLX4_NUM_TUNNEL_BUFS;
|
||||
err:
|
||||
while (i > 0) {
|
||||
@@ -1687,6 +1685,8 @@ err:
|
||||
rx_buf_size, DMA_FROM_DEVICE);
|
||||
kfree(tun_qp->ring[i].addr);
|
||||
}
|
||||
kfree(tun_qp->tx_ring);
|
||||
tun_qp->tx_ring = NULL;
|
||||
kfree(tun_qp->ring);
|
||||
tun_qp->ring = NULL;
|
||||
return -ENOMEM;
|
||||
|
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||
if (MLX5_CAP_GEN(mdev, pg))
|
||||
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
|
||||
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
|
||||
props->odp_caps = dev->odp_caps;
|
||||
}
|
||||
@@ -6179,6 +6179,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
|
||||
}
|
||||
|
||||
mlx5_ib_internal_fill_odp_caps(dev);
|
||||
|
||||
err = mlx5_ib_init_multiport_master(dev);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -6603,8 +6605,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
|
||||
|
||||
static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_ib_internal_fill_odp_caps(dev);
|
||||
|
||||
return mlx5_ib_odp_init_one(dev);
|
||||
}
|
||||
|
||||
|
@@ -1477,4 +1477,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
|
||||
|
||||
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
|
||||
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
|
||||
|
||||
static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
|
||||
bool do_modify_atomic)
|
||||
{
|
||||
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
|
||||
return false;
|
||||
|
||||
if (do_modify_atomic &&
|
||||
MLX5_CAP_GEN(dev->mdev, atomic) &&
|
||||
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif /* MLX5_IB_H */
|
||||
|
@@ -1311,9 +1311,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
|
||||
(!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
|
||||
!MLX5_CAP_GEN(dev->mdev, atomic));
|
||||
use_umr = mlx5_ib_can_use_umr(dev, true);
|
||||
|
||||
if (order <= mr_cache_max_order(dev) && use_umr) {
|
||||
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
||||
@@ -1466,7 +1464,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
|
||||
if (!mlx5_ib_can_use_umr(dev, true) ||
|
||||
(flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
|
||||
/*
|
||||
* UMR can't be used - MKey needs to be replaced.
|
||||
*/
|
||||
|
@@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
||||
|
||||
memset(caps, 0, sizeof(*caps));
|
||||
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg))
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg) ||
|
||||
!mlx5_ib_can_use_umr(dev, true))
|
||||
return;
|
||||
|
||||
caps->general_caps = IB_ODP_SUPPORT;
|
||||
@@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
|
||||
MLX5_CAP_GEN(dev->mdev, null_mkey) &&
|
||||
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
|
||||
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
|
||||
!MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
|
||||
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
|
||||
|
||||
return;
|
||||
@@ -1549,8 +1551,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
|
||||
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
|
||||
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
|
||||
return ret;
|
||||
|
||||
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
|
||||
|
||||
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
|
||||
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
|
||||
@@ -1560,9 +1564,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg))
|
||||
return ret;
|
||||
|
||||
ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
|
||||
|
||||
return ret;
|
||||
@@ -1570,7 +1571,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
|
||||
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg))
|
||||
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
|
||||
return;
|
||||
|
||||
mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
|
||||
|
@@ -4161,7 +4161,7 @@ static u64 get_xlt_octo(u64 bytes)
|
||||
MLX5_IB_UMR_OCTOWORD;
|
||||
}
|
||||
|
||||
static __be64 frwr_mkey_mask(void)
|
||||
static __be64 frwr_mkey_mask(bool atomic)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
@@ -4174,10 +4174,12 @@ static __be64 frwr_mkey_mask(void)
|
||||
MLX5_MKEY_MASK_LW |
|
||||
MLX5_MKEY_MASK_RR |
|
||||
MLX5_MKEY_MASK_RW |
|
||||
MLX5_MKEY_MASK_A |
|
||||
MLX5_MKEY_MASK_SMALL_FENCE |
|
||||
MLX5_MKEY_MASK_FREE;
|
||||
|
||||
if (atomic)
|
||||
result |= MLX5_MKEY_MASK_A;
|
||||
|
||||
return cpu_to_be64(result);
|
||||
}
|
||||
|
||||
@@ -4203,7 +4205,7 @@ static __be64 sig_mkey_mask(void)
|
||||
}
|
||||
|
||||
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
struct mlx5_ib_mr *mr, u8 flags)
|
||||
struct mlx5_ib_mr *mr, u8 flags, bool atomic)
|
||||
{
|
||||
int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
|
||||
|
||||
@@ -4211,7 +4213,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
|
||||
umr->flags = flags;
|
||||
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
|
||||
umr->mkey_mask = frwr_mkey_mask();
|
||||
umr->mkey_mask = frwr_mkey_mask(atomic);
|
||||
}
|
||||
|
||||
static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
|
||||
@@ -4810,10 +4812,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
||||
{
|
||||
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
|
||||
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
|
||||
int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
|
||||
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
|
||||
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
|
||||
u8 flags = 0;
|
||||
|
||||
if (!mlx5_ib_can_use_umr(dev, atomic)) {
|
||||
mlx5_ib_warn(to_mdev(qp->ibqp.device),
|
||||
"Fast update of %s for MR is disabled\n",
|
||||
(MLX5_CAP_GEN(dev->mdev,
|
||||
umr_modify_entity_size_disabled)) ?
|
||||
"entity size" :
|
||||
"atomic access");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
|
||||
mlx5_ib_warn(to_mdev(qp->ibqp.device),
|
||||
"Invalid IB_SEND_INLINE send flag\n");
|
||||
@@ -4825,7 +4839,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
||||
if (umr_inline)
|
||||
flags |= MLX5_UMR_INLINE;
|
||||
|
||||
set_reg_umr_seg(*seg, mr, flags);
|
||||
set_reg_umr_seg(*seg, mr, flags, atomic);
|
||||
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
||||
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
||||
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
|
||||
|
Reference in New Issue
Block a user