RDMA: Constify the argument of the work request conversion functions
When posting a send work request, the work request that is posted is not modified by any of the RDMA drivers. Make this explicit by constifying most ib_send_wr pointers in RDMA transport drivers. Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
此提交包含在:
@@ -2925,7 +2925,7 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
|
||||
}
|
||||
|
||||
static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
||||
struct ib_ud_wr *wr,
|
||||
const struct ib_ud_wr *wr,
|
||||
void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
|
||||
@@ -3073,7 +3073,7 @@ static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num,
|
||||
}
|
||||
|
||||
#define MLX4_ROCEV2_QP1_SPORT 0xC000
|
||||
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
||||
static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
|
||||
void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct ib_device *ib_dev = sqp->qp.ibqp.device;
|
||||
@@ -3355,7 +3355,7 @@ static __be32 convert_access(int acc)
|
||||
}
|
||||
|
||||
static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg,
|
||||
struct ib_reg_wr *wr)
|
||||
const struct ib_reg_wr *wr)
|
||||
{
|
||||
struct mlx4_ib_mr *mr = to_mmr(wr->mr);
|
||||
|
||||
@@ -3385,7 +3385,7 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
|
||||
}
|
||||
|
||||
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
|
||||
struct ib_atomic_wr *wr)
|
||||
const struct ib_atomic_wr *wr)
|
||||
{
|
||||
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->swap);
|
||||
@@ -3401,7 +3401,7 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
|
||||
}
|
||||
|
||||
static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
|
||||
struct ib_atomic_wr *wr)
|
||||
const struct ib_atomic_wr *wr)
|
||||
{
|
||||
aseg->swap_add = cpu_to_be64(wr->swap);
|
||||
aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
|
||||
@@ -3410,7 +3410,7 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
|
||||
}
|
||||
|
||||
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
|
||||
struct ib_ud_wr *wr)
|
||||
const struct ib_ud_wr *wr)
|
||||
{
|
||||
memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
|
||||
dseg->dqpn = cpu_to_be32(wr->remote_qpn);
|
||||
@@ -3421,7 +3421,7 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
|
||||
|
||||
static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
|
||||
struct mlx4_wqe_datagram_seg *dseg,
|
||||
struct ib_ud_wr *wr,
|
||||
const struct ib_ud_wr *wr,
|
||||
enum mlx4_ib_qp_type qpt)
|
||||
{
|
||||
union mlx4_ext_av *av = &to_mah(wr->ah)->av;
|
||||
@@ -3443,7 +3443,8 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
|
||||
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
|
||||
}
|
||||
|
||||
static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
|
||||
static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe,
|
||||
unsigned *mlx_seg_len)
|
||||
{
|
||||
struct mlx4_wqe_inline_seg *inl = wqe;
|
||||
struct mlx4_ib_tunnel_header hdr;
|
||||
@@ -3526,9 +3527,9 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
|
||||
dseg->addr = cpu_to_be64(sg->addr);
|
||||
}
|
||||
|
||||
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
|
||||
struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
|
||||
__be32 *lso_hdr_sz, __be32 *blh)
|
||||
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe,
|
||||
const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
|
||||
unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh)
|
||||
{
|
||||
unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
|
||||
|
||||
@@ -3546,7 +3547,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __be32 send_ieth(struct ib_send_wr *wr)
|
||||
static __be32 send_ieth(const struct ib_send_wr *wr)
|
||||
{
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
|
新增問題並參考
封鎖使用者