Merge branch 'wip/dl-for-rc' into wip/dl-for-next
The fix for IB port statistics initialization ("IB/core: Fix querying total rdma stats") is needed before we take a follow-on patch to for-next. Signed-off-by: Doug Ledford <dledford@redhat.com>
Este cometimento está contido em:
@@ -481,6 +481,7 @@ struct mlx5_umr_wr {
|
||||
u64 length;
|
||||
int access_flags;
|
||||
u32 mkey;
|
||||
u8 ignore_free_state:1;
|
||||
};
|
||||
|
||||
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
|
||||
|
@@ -545,14 +545,17 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
return;
|
||||
|
||||
c = order2idx(dev, mr->order);
|
||||
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
|
||||
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
|
||||
WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
|
||||
|
||||
if (unreg_umr(dev, mr)) {
|
||||
mr->allocated_from_cache = false;
|
||||
destroy_mkey(dev, mr);
|
||||
ent = &cache->ent[c];
|
||||
if (ent->cur < ent->limit)
|
||||
queue_work(cache->wq, &ent->work);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unreg_umr(dev, mr))
|
||||
return;
|
||||
|
||||
ent = &cache->ent[c];
|
||||
spin_lock_irq(&ent->lock);
|
||||
list_add_tail(&mr->list, &ent->head);
|
||||
@@ -1373,9 +1376,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
return 0;
|
||||
|
||||
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
|
||||
MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
|
||||
umrwr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
umrwr.pd = dev->umrc.pd;
|
||||
umrwr.mkey = mr->mmkey.key;
|
||||
umrwr.ignore_free_state = 1;
|
||||
|
||||
return mlx5_ib_post_send_wait(dev, &umrwr);
|
||||
}
|
||||
@@ -1577,10 +1582,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
mr->sig = NULL;
|
||||
}
|
||||
|
||||
mlx5_free_priv_descs(mr);
|
||||
|
||||
if (!allocated_from_cache)
|
||||
if (!allocated_from_cache) {
|
||||
destroy_mkey(dev, mr);
|
||||
mlx5_free_priv_descs(mr);
|
||||
}
|
||||
}
|
||||
|
||||
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
|
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
|
||||
* overwrite the same MTTs. Concurent invalidations might race us,
|
||||
* but they will write 0s as well, so no difference in the end result.
|
||||
*/
|
||||
|
||||
mutex_lock(&umem_odp->umem_mutex);
|
||||
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
|
||||
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
|
||||
/*
|
||||
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
|
||||
idx - blk_start_idx + 1, 0,
|
||||
MLX5_IB_UPD_XLT_ZAP |
|
||||
MLX5_IB_UPD_XLT_ATOMIC);
|
||||
mutex_unlock(&umem_odp->umem_mutex);
|
||||
/*
|
||||
* We are now sure that the device will not access the
|
||||
* memory. We can safely unmap it, and mark it as dirty if
|
||||
@@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
|
||||
|
||||
num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
|
||||
w->num_sge, 0);
|
||||
kfree(w);
|
||||
kvfree(w);
|
||||
}
|
||||
|
||||
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
|
||||
@@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
|
||||
if (valid_req)
|
||||
queue_work(system_unbound_wq, &work->work);
|
||||
else
|
||||
kfree(work);
|
||||
kvfree(work);
|
||||
|
||||
srcu_read_unlock(&dev->mr_srcu, srcu_key);
|
||||
|
||||
|
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
||||
}
|
||||
|
||||
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
|
||||
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
|
||||
memcpy(rss_key, ucmd.rx_hash_key, len);
|
||||
break;
|
||||
}
|
||||
@@ -4294,10 +4293,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
|
||||
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
|
||||
umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
|
||||
else
|
||||
umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
|
||||
if (!umrwr->ignore_free_state) {
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
|
||||
/* fail if free */
|
||||
umr->flags = MLX5_UMR_CHECK_FREE;
|
||||
else
|
||||
/* fail if not free */
|
||||
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
|
||||
}
|
||||
|
||||
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador