RDMA: Mark if destroy address handle is in a sleepable context
Introduce a 'flags' field to destroy address handle callback and add a flag that marks whether the callback is executed in an atomic context or not. This will allow drivers to wait for completion instead of polling for it when it is allowed. Signed-off-by: Gal Pressman <galpress@amazon.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:

committed by
Jason Gunthorpe

szülő
b090c4e3a0
commit
2553ba217e
@@ -208,7 +208,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
|
||||
|
||||
spin_lock_irqsave(&dev->sm_lock, flags);
|
||||
if (dev->sm_ah[port_num - 1])
|
||||
rdma_destroy_ah(dev->sm_ah[port_num - 1]);
|
||||
rdma_destroy_ah(dev->sm_ah[port_num - 1], 0);
|
||||
dev->sm_ah[port_num - 1] = new_ah;
|
||||
spin_unlock_irqrestore(&dev->sm_lock, flags);
|
||||
}
|
||||
@@ -584,7 +584,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
|
||||
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
|
||||
if (tun_qp->tx_ring[tun_tx_ix].ah)
|
||||
rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
|
||||
rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
|
||||
tun_qp->tx_ring[tun_tx_ix].ah = ah;
|
||||
ib_dma_sync_single_for_cpu(&dev->ib_dev,
|
||||
tun_qp->tx_ring[tun_tx_ix].buf.map,
|
||||
@@ -657,7 +657,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
spin_unlock(&tun_qp->tx_lock);
|
||||
tun_qp->tx_ring[tun_tx_ix].ah = NULL;
|
||||
end:
|
||||
rdma_destroy_ah(ah);
|
||||
rdma_destroy_ah(ah, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1024,7 +1024,7 @@ static void send_handler(struct ib_mad_agent *agent,
|
||||
struct ib_mad_send_wc *mad_send_wc)
|
||||
{
|
||||
if (mad_send_wc->send_buf->context[0])
|
||||
rdma_destroy_ah(mad_send_wc->send_buf->context[0]);
|
||||
rdma_destroy_ah(mad_send_wc->send_buf->context[0], 0);
|
||||
ib_free_send_mad(mad_send_wc->send_buf);
|
||||
}
|
||||
|
||||
@@ -1079,7 +1079,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
|
||||
}
|
||||
|
||||
if (dev->sm_ah[p])
|
||||
rdma_destroy_ah(dev->sm_ah[p]);
|
||||
rdma_destroy_ah(dev->sm_ah[p], 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
|
||||
sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
|
||||
if (sqp->tx_ring[wire_tx_ix].ah)
|
||||
rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
|
||||
rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
|
||||
sqp->tx_ring[wire_tx_ix].ah = ah;
|
||||
ib_dma_sync_single_for_cpu(&dev->ib_dev,
|
||||
sqp->tx_ring[wire_tx_ix].buf.map,
|
||||
@@ -1450,7 +1450,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
spin_unlock(&sqp->tx_lock);
|
||||
sqp->tx_ring[wire_tx_ix].ah = NULL;
|
||||
out:
|
||||
mlx4_ib_destroy_ah(ah);
|
||||
mlx4_ib_destroy_ah(ah, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1716,7 +1716,7 @@ static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
|
||||
tx_buf_size, DMA_TO_DEVICE);
|
||||
kfree(tun_qp->tx_ring[i].buf.addr);
|
||||
if (tun_qp->tx_ring[i].ah)
|
||||
rdma_destroy_ah(tun_qp->tx_ring[i].ah);
|
||||
rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0);
|
||||
}
|
||||
kfree(tun_qp->tx_ring);
|
||||
kfree(tun_qp->ring);
|
||||
@@ -1749,7 +1749,7 @@ static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
|
||||
"wrid=0x%llx, status=0x%x\n",
|
||||
wc.wr_id, wc.status);
|
||||
rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
|
||||
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
|
||||
= NULL;
|
||||
spin_lock(&tun_qp->tx_lock);
|
||||
@@ -1766,7 +1766,7 @@ static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
|
||||
ctx->slave, wc.status, wc.wr_id);
|
||||
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
|
||||
rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
|
||||
tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
|
||||
= NULL;
|
||||
spin_lock(&tun_qp->tx_lock);
|
||||
@@ -1903,7 +1903,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
|
||||
switch (wc.opcode) {
|
||||
case IB_WC_SEND:
|
||||
rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
|
||||
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
|
||||
= NULL;
|
||||
spin_lock(&sqp->tx_lock);
|
||||
@@ -1932,7 +1932,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
|
||||
ctx->slave, wc.status, wc.wr_id);
|
||||
if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
|
||||
rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah);
|
||||
(MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
|
||||
sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
|
||||
= NULL;
|
||||
spin_lock(&sqp->tx_lock);
|
||||
|
Reference in New Issue
Block a user