RDMA: Handle AH allocations by IB/core
Simplify drivers by ensuring lifetime of ib_ah object. The changes in .create_ah() go hand in hand with relevant update in .destroy_ah(). We will use this opportunity and convert .destroy_ah() to don't fail, as it was suggested a long time ago, because there is nothing to do in case of failure during destroy. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:

committed by
Jason Gunthorpe

parent
f6316032fd
commit
d345691471
@@ -40,13 +40,12 @@
|
||||
|
||||
#include "mlx4_ib.h"
|
||||
|
||||
static struct ib_ah *create_ib_ah(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
struct mlx4_ib_ah *ah)
|
||||
static void create_ib_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
|
||||
{
|
||||
struct mlx4_dev *dev = to_mdev(pd->device)->dev;
|
||||
struct mlx4_ib_ah *ah = to_mah(ib_ah);
|
||||
struct mlx4_dev *dev = to_mdev(ib_ah->device)->dev;
|
||||
|
||||
ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn |
|
||||
ah->av.ib.port_pd = cpu_to_be32(to_mpd(ib_ah->pd)->pdn |
|
||||
(rdma_ah_get_port_num(ah_attr) << 24));
|
||||
ah->av.ib.g_slid = rdma_ah_get_path_bits(ah_attr);
|
||||
ah->av.ib.sl_tclass_flowlabel =
|
||||
@@ -73,15 +72,12 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd,
|
||||
--static_rate;
|
||||
ah->av.ib.stat_rate = static_rate;
|
||||
}
|
||||
|
||||
return &ah->ibah;
|
||||
}
|
||||
|
||||
static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
struct mlx4_ib_ah *ah)
|
||||
static int create_iboe_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
|
||||
{
|
||||
struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
|
||||
struct mlx4_ib_dev *ibdev = to_mdev(ib_ah->device);
|
||||
struct mlx4_ib_ah *ah = to_mah(ib_ah);
|
||||
const struct ib_gid_attr *gid_attr;
|
||||
struct mlx4_dev *dev = ibdev->dev;
|
||||
int is_mcast = 0;
|
||||
@@ -108,7 +104,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
|
||||
memcpy(ah->av.eth.s_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
|
||||
ret = mlx4_ib_gid_index_to_real_index(ibdev, gid_attr);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
ah->av.eth.gid_index = ret;
|
||||
} else {
|
||||
/* mlx4_ib_create_ah_slave fills in the s_mac and the vlan */
|
||||
@@ -117,7 +113,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
|
||||
|
||||
if (vlan_tag < 0x1000)
|
||||
vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
|
||||
ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn |
|
||||
ah->av.eth.port_pd = cpu_to_be32(to_mpd(ib_ah->pd)->pdn |
|
||||
(rdma_ah_get_port_num(ah_attr) << 24));
|
||||
ah->av.eth.vlan = cpu_to_be16(vlan_tag);
|
||||
ah->av.eth.hop_limit = grh->hop_limit;
|
||||
@@ -140,63 +136,45 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
|
||||
memcpy(ah->av.eth.dgid, grh->dgid.raw, 16);
|
||||
ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(rdma_ah_get_sl(ah_attr)
|
||||
<< 29);
|
||||
return &ah->ibah;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
|
||||
u32 flags, struct ib_udata *udata)
|
||||
int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
|
||||
u32 flags, struct ib_udata *udata)
|
||||
|
||||
{
|
||||
struct mlx4_ib_ah *ah;
|
||||
struct ib_ah *ret;
|
||||
|
||||
ah = kzalloc(sizeof *ah, GFP_ATOMIC);
|
||||
if (!ah)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
|
||||
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
|
||||
ret = ERR_PTR(-EINVAL);
|
||||
} else {
|
||||
/*
|
||||
* TBD: need to handle the case when we get
|
||||
* called in an atomic context and there we
|
||||
* might sleep. We don't expect this
|
||||
* currently since we're working with link
|
||||
* local addresses which we can translate
|
||||
* without going to sleep.
|
||||
*/
|
||||
ret = create_iboe_ah(pd, ah_attr, ah);
|
||||
}
|
||||
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
|
||||
return -EINVAL;
|
||||
/*
|
||||
* TBD: need to handle the case when we get
|
||||
* called in an atomic context and there we
|
||||
* might sleep. We don't expect this
|
||||
* currently since we're working with link
|
||||
* local addresses which we can translate
|
||||
* without going to sleep.
|
||||
*/
|
||||
return create_iboe_ah(ib_ah, ah_attr);
|
||||
}
|
||||
|
||||
if (IS_ERR(ret))
|
||||
kfree(ah);
|
||||
|
||||
return ret;
|
||||
} else
|
||||
return create_ib_ah(pd, ah_attr, ah); /* never fails */
|
||||
create_ib_ah(ib_ah, ah_attr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* AH's created via this call must be free'd by mlx4_ib_destroy_ah. */
|
||||
struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
int slave_sgid_index, u8 *s_mac,
|
||||
u16 vlan_tag)
|
||||
int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
|
||||
int slave_sgid_index, u8 *s_mac, u16 vlan_tag)
|
||||
{
|
||||
struct rdma_ah_attr slave_attr = *ah_attr;
|
||||
struct mlx4_ib_ah *mah;
|
||||
struct ib_ah *ah;
|
||||
struct mlx4_ib_ah *mah = to_mah(ah);
|
||||
int ret;
|
||||
|
||||
slave_attr.grh.sgid_attr = NULL;
|
||||
slave_attr.grh.sgid_index = slave_sgid_index;
|
||||
ah = mlx4_ib_create_ah(pd, &slave_attr, 0, NULL);
|
||||
if (IS_ERR(ah))
|
||||
return ah;
|
||||
ret = mlx4_ib_create_ah(ah, &slave_attr, 0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ah->device = pd->device;
|
||||
ah->pd = pd;
|
||||
ah->type = ah_attr->type;
|
||||
mah = to_mah(ah);
|
||||
|
||||
/* get rid of force-loopback bit */
|
||||
mah->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
|
||||
@@ -208,7 +186,7 @@ struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
|
||||
vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13;
|
||||
mah->av.eth.vlan = cpu_to_be16(vlan_tag);
|
||||
|
||||
return ah;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
|
||||
@@ -250,8 +228,7 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
|
||||
void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags)
|
||||
{
|
||||
kfree(to_mah(ah));
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
@@ -1371,9 +1371,9 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
struct ib_ah *ah;
|
||||
struct ib_qp *send_qp = NULL;
|
||||
unsigned wire_tx_ix = 0;
|
||||
int ret = 0;
|
||||
u16 wire_pkey_ix;
|
||||
int src_qpnum;
|
||||
int ret;
|
||||
|
||||
sqp_ctx = dev->sriov.sqps[port-1];
|
||||
|
||||
@@ -1393,12 +1393,20 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
|
||||
send_qp = sqp->qp;
|
||||
|
||||
/* create ah */
|
||||
ah = mlx4_ib_create_ah_slave(sqp_ctx->pd, attr,
|
||||
rdma_ah_retrieve_grh(attr)->sgid_index,
|
||||
s_mac, vlan_id);
|
||||
if (IS_ERR(ah))
|
||||
ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah);
|
||||
if (!ah)
|
||||
return -ENOMEM;
|
||||
|
||||
ah->device = sqp_ctx->pd->device;
|
||||
ah->pd = sqp_ctx->pd;
|
||||
|
||||
/* create ah */
|
||||
ret = mlx4_ib_create_ah_slave(ah, attr,
|
||||
rdma_ah_retrieve_grh(attr)->sgid_index,
|
||||
s_mac, vlan_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
spin_lock(&sqp->tx_lock);
|
||||
if (sqp->tx_ix_head - sqp->tx_ix_tail >=
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1))
|
||||
@@ -1410,8 +1418,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
goto out;
|
||||
|
||||
sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
|
||||
if (sqp->tx_ring[wire_tx_ix].ah)
|
||||
mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0, NULL);
|
||||
kfree(sqp->tx_ring[wire_tx_ix].ah);
|
||||
sqp->tx_ring[wire_tx_ix].ah = ah;
|
||||
ib_dma_sync_single_for_cpu(&dev->ib_dev,
|
||||
sqp->tx_ring[wire_tx_ix].buf.map,
|
||||
@@ -1450,7 +1457,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
spin_unlock(&sqp->tx_lock);
|
||||
sqp->tx_ring[wire_tx_ix].ah = NULL;
|
||||
out:
|
||||
mlx4_ib_destroy_ah(ah, 0, NULL);
|
||||
kfree(ah);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1902,9 +1909,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
|
||||
if (wc.status == IB_WC_SUCCESS) {
|
||||
switch (wc.opcode) {
|
||||
case IB_WC_SEND:
|
||||
mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah,
|
||||
0, NULL);
|
||||
kfree(sqp->tx_ring[wc.wr_id &
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
|
||||
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
|
||||
= NULL;
|
||||
spin_lock(&sqp->tx_lock);
|
||||
@@ -1932,9 +1938,8 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
|
||||
" status = %d, wrid = 0x%llx\n",
|
||||
ctx->slave, wc.status, wc.wr_id);
|
||||
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
|
||||
mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah,
|
||||
0, NULL);
|
||||
kfree(sqp->tx_ring[wc.wr_id &
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
|
||||
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
|
||||
= NULL;
|
||||
spin_lock(&sqp->tx_lock);
|
||||
|
@@ -2558,6 +2558,8 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
|
||||
.req_notify_cq = mlx4_ib_arm_cq,
|
||||
.rereg_user_mr = mlx4_ib_rereg_user_mr,
|
||||
.resize_cq = mlx4_ib_resize_cq,
|
||||
|
||||
INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
|
||||
};
|
||||
|
@@ -752,14 +752,12 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
|
||||
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
|
||||
|
||||
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
|
||||
u32 flags, struct ib_udata *udata);
|
||||
struct ib_ah *mlx4_ib_create_ah_slave(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
int slave_sgid_index, u8 *s_mac,
|
||||
u16 vlan_tag);
|
||||
int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
|
||||
int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
|
||||
int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
|
||||
int mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
|
||||
void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags);
|
||||
|
||||
struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
|
||||
struct ib_srq_init_attr *init_attr,
|
||||
|
Reference in New Issue
Block a user