Merge tag 'for-linus-unmerged' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "Doug and I are at a conference next week so if another PR is sent I expect it to only be bug fixes. Parav noted yesterday that there are some fringe case behavior changes in his work that he would like to fix, and I see that Intel has a number of rc looking patches for HFI1 they posted yesterday. Parav is again the biggest contributor by patch count with his ongoing work to enable container support in the RDMA stack, followed by Leon doing syzkaller inspired cleanups, though most of the actual fixing went to RC. There is one uncomfortable series here fixing the user ABI to actually work as intended in 32 bit mode. There are lots of notes in the commit messages, but the basic summary is we don't think there is an actual 32 bit kernel user of drivers/infiniband for several good reasons. However we are seeing people want to use a 32 bit user space with 64 bit kernel, which didn't completely work today. So in fixing it we required a 32 bit rxe user to upgrade their userspace. rxe users are still already quite rare and we think a 32 bit one is non-existing. - Fix RDMA uapi headers to actually compile in userspace and be more complete - Three shared with netdev pull requests from Mellanox: * 7 patches, mostly to net with 1 IB related one at the back). This series addresses an IRQ performance issue (patch 1), cleanups related to the fix for the IRQ performance problem (patches 2-6), and then extends the fragmented completion queue support that already exists in the net side of the driver to the ib side of the driver (patch 7). * Mostly IB, with 5 patches to net that are needed to support the remaining 10 patches to the IB subsystem. This series extends the current 'representor' framework when the mlx5 driver is in switchdev mode from being a netdev only construct to being a netdev/IB dev construct. The IB dev is limited to raw Eth queue pairs only, but by having an IB dev of this type attached to the representor for a switchdev port, it enables DPDK to work on the switchdev device. * All net related, but needed as infrastructure for the rdma driver - Updates for the hns, i40iw, bnxt_re, cxgb3, cxgb4, hns drivers - SRP performance updates - IB uverbs write path cleanup patch series from Leon - Add RDMA_CM support to ib_srpt. This is disabled by default. Users need to set the port for ib_srpt to listen on in configfs in order for it to be enabled (/sys/kernel/config/target/srpt/discovery_auth/rdma_cm_port) - TSO and Scatter FCS support in mlx4 - Refactor of modify_qp routine to resolve problems seen while working on new code that is forthcoming - More refactoring and updates of RDMA CM for containers support from Parav - mlx5 'fine grained packet pacing', 'ipsec offload' and 'device memory' user API features - Infrastructure updates for the new IOCTL interface, based on increased usage - ABI compatibility bug fixes to fully support 32 bit userspace on 64 bit kernel as was originally intended. See the commit messages for extensive details - Syzkaller bugs and code cleanups motivated by them" * tag 'for-linus-unmerged' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (199 commits) IB/rxe: Fix for oops in rxe_register_device on ppc64le arch IB/mlx5: Device memory mr registration support net/mlx5: Mkey creation command adjustments IB/mlx5: Device memory support in mlx5_ib net/mlx5: Query device memory capabilities IB/uverbs: Add device memory registration ioctl support IB/uverbs: Add alloc/free dm uverbs ioctl support IB/uverbs: Add device memory capabilities reporting IB/uverbs: Expose device memory capabilities to user RDMA/qedr: Fix wmb usage in qedr IB/rxe: Removed GID add/del dummy routines RDMA/qedr: Zero stack memory before copying to user space IB/mlx5: Add ability to hash by IPSEC_SPI when creating a TIR IB/mlx5: Add information for querying IPsec capabilities IB/mlx5: Add IPsec support for egress and ingress {net,IB}/mlx5: Add ipsec helper IB/mlx5: Add modify_flow_action_esp verb IB/mlx5: Add implementation for create and destroy action_xfrm IB/uverbs: Introduce ESP steering match filter IB/uverbs: Add modify ESP flow_action ...
此提交包含在:
@@ -47,10 +47,25 @@ enum {
|
||||
|
||||
#define MLX5_UMR_ALIGN 2048
|
||||
|
||||
static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
|
||||
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
|
||||
static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
|
||||
}
|
||||
|
||||
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
|
||||
}
|
||||
|
||||
static bool use_umr(struct mlx5_ib_dev *dev, int order)
|
||||
{
|
||||
return order <= mr_cache_max_order(dev) &&
|
||||
umr_can_modify_entity_size(dev);
|
||||
}
|
||||
|
||||
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
@@ -189,7 +204,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
|
||||
|
||||
MLX5_SET(mkc, mkc, free, 1);
|
||||
MLX5_SET(mkc, mkc, umr_en, 1);
|
||||
MLX5_SET(mkc, mkc, access_mode, ent->access_mode);
|
||||
MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
|
||||
MLX5_SET(mkc, mkc, access_mode_4_2,
|
||||
(ent->access_mode >> 2) & 0x7);
|
||||
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
|
||||
@@ -220,26 +237,32 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
|
||||
{
|
||||
struct mlx5_mr_cache *cache = &dev->cache;
|
||||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||
struct mlx5_ib_mr *tmp_mr;
|
||||
struct mlx5_ib_mr *mr;
|
||||
int err;
|
||||
LIST_HEAD(del_list);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
spin_lock_irq(&ent->lock);
|
||||
if (list_empty(&ent->head)) {
|
||||
spin_unlock_irq(&ent->lock);
|
||||
return;
|
||||
break;
|
||||
}
|
||||
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
||||
list_del(&mr->list);
|
||||
list_move(&mr->list, &del_list);
|
||||
ent->cur--;
|
||||
ent->size--;
|
||||
spin_unlock_irq(&ent->lock);
|
||||
err = destroy_mkey(dev, mr);
|
||||
if (err)
|
||||
mlx5_ib_warn(dev, "failed destroy mkey\n");
|
||||
else
|
||||
kfree(mr);
|
||||
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
#endif
|
||||
|
||||
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
||||
list_del(&mr->list);
|
||||
kfree(mr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -562,26 +585,32 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
|
||||
{
|
||||
struct mlx5_mr_cache *cache = &dev->cache;
|
||||
struct mlx5_cache_ent *ent = &cache->ent[c];
|
||||
struct mlx5_ib_mr *tmp_mr;
|
||||
struct mlx5_ib_mr *mr;
|
||||
int err;
|
||||
LIST_HEAD(del_list);
|
||||
|
||||
cancel_delayed_work(&ent->dwork);
|
||||
while (1) {
|
||||
spin_lock_irq(&ent->lock);
|
||||
if (list_empty(&ent->head)) {
|
||||
spin_unlock_irq(&ent->lock);
|
||||
return;
|
||||
break;
|
||||
}
|
||||
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
|
||||
list_del(&mr->list);
|
||||
list_move(&mr->list, &del_list);
|
||||
ent->cur--;
|
||||
ent->size--;
|
||||
spin_unlock_irq(&ent->lock);
|
||||
err = destroy_mkey(dev, mr);
|
||||
if (err)
|
||||
mlx5_ib_warn(dev, "failed destroy mkey\n");
|
||||
else
|
||||
kfree(mr);
|
||||
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
synchronize_srcu(&dev->mr_srcu);
|
||||
#endif
|
||||
|
||||
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
|
||||
list_del(&mr->list);
|
||||
kfree(mr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -780,7 +809,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
|
||||
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
|
||||
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
|
||||
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
||||
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
||||
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
||||
@@ -947,7 +976,10 @@ static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
|
||||
{
|
||||
struct mlx5_ib_dev *dev = mr->dev;
|
||||
struct ib_umem *umem = mr->umem;
|
||||
|
||||
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
|
||||
if (!umr_can_use_indirect_mkey(dev))
|
||||
return -EPERM;
|
||||
mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
|
||||
return npages;
|
||||
}
|
||||
@@ -977,7 +1009,6 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
|
||||
{
|
||||
struct mlx5_ib_dev *dev = mr->dev;
|
||||
struct device *ddev = dev->ib_dev.dev.parent;
|
||||
struct mlx5_ib_ucontext *uctx = NULL;
|
||||
int size;
|
||||
void *xlt;
|
||||
dma_addr_t dma;
|
||||
@@ -993,6 +1024,11 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
|
||||
size_t pages_to_map = 0;
|
||||
size_t pages_iter = 0;
|
||||
gfp_t gfp;
|
||||
bool use_emergency_page = false;
|
||||
|
||||
if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
|
||||
!umr_can_use_indirect_mkey(dev))
|
||||
return -EPERM;
|
||||
|
||||
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
|
||||
* so we need to align the offset and length accordingly
|
||||
@@ -1019,12 +1055,11 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
|
||||
}
|
||||
|
||||
if (!xlt) {
|
||||
uctx = to_mucontext(mr->ibmr.pd->uobject->context);
|
||||
mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
|
||||
xlt = (void *)mlx5_ib_get_xlt_emergency_page();
|
||||
size = PAGE_SIZE;
|
||||
xlt = (void *)uctx->upd_xlt_page;
|
||||
mutex_lock(&uctx->upd_xlt_page_mutex);
|
||||
memset(xlt, 0, size);
|
||||
use_emergency_page = true;
|
||||
}
|
||||
pages_iter = size / desc_size;
|
||||
dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
|
||||
@@ -1088,8 +1123,8 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
|
||||
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
|
||||
|
||||
free_xlt:
|
||||
if (uctx)
|
||||
mutex_unlock(&uctx->upd_xlt_page_mutex);
|
||||
if (use_emergency_page)
|
||||
mlx5_ib_put_xlt_emergency_page();
|
||||
else
|
||||
free_pages((unsigned long)xlt, get_order(size));
|
||||
|
||||
@@ -1141,7 +1176,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
|
||||
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
MLX5_SET(mkc, mkc, free, !populate);
|
||||
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
|
||||
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
|
||||
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
|
||||
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
|
||||
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
|
||||
@@ -1197,22 +1232,96 @@ static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||
mr->access_flags = access_flags;
|
||||
}
|
||||
|
||||
static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
|
||||
u64 length, int acc)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_ib_mr *mr;
|
||||
void *mkc;
|
||||
u32 *in;
|
||||
int err;
|
||||
|
||||
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
||||
if (!mr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
in = kzalloc(inlen, GFP_KERNEL);
|
||||
if (!in) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
|
||||
|
||||
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
|
||||
MLX5_SET(mkc, mkc, access_mode_4_2,
|
||||
(MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
|
||||
MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
|
||||
MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
|
||||
MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
|
||||
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
|
||||
MLX5_SET(mkc, mkc, lr, 1);
|
||||
|
||||
MLX5_SET64(mkc, mkc, len, length);
|
||||
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
MLX5_SET64(mkc, mkc, start_addr,
|
||||
memic_addr - pci_resource_start(dev->mdev->pdev, 0));
|
||||
|
||||
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
|
||||
if (err)
|
||||
goto err_in;
|
||||
|
||||
kfree(in);
|
||||
|
||||
mr->umem = NULL;
|
||||
set_mr_fileds(dev, mr, 0, length, acc);
|
||||
|
||||
return &mr->ibmr;
|
||||
|
||||
err_in:
|
||||
kfree(in);
|
||||
|
||||
err_free:
|
||||
kfree(mr);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
|
||||
struct ib_dm_mr_attr *attr,
|
||||
struct uverbs_attr_bundle *attrs)
|
||||
{
|
||||
struct mlx5_ib_dm *mdm = to_mdm(dm);
|
||||
u64 memic_addr;
|
||||
|
||||
if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
memic_addr = mdm->dev_addr + attr->offset;
|
||||
|
||||
return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
|
||||
attr->access_flags);
|
||||
}
|
||||
|
||||
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx5_ib_mr *mr = NULL;
|
||||
bool populate_mtts = false;
|
||||
struct ib_umem *umem;
|
||||
int page_shift;
|
||||
int npages;
|
||||
int ncont;
|
||||
int order;
|
||||
int err;
|
||||
bool use_umr = true;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
|
||||
return ERR_PTR(-EINVAL);
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
|
||||
start, virt_addr, length, access_flags);
|
||||
@@ -1224,6 +1333,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
|
||||
if (IS_ERR(mr))
|
||||
return ERR_CAST(mr);
|
||||
return &mr->ibmr;
|
||||
}
|
||||
#endif
|
||||
@@ -1234,26 +1345,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (order <= mr_cache_max_order(dev)) {
|
||||
if (use_umr(dev, order)) {
|
||||
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
||||
page_shift, order, access_flags);
|
||||
if (PTR_ERR(mr) == -EAGAIN) {
|
||||
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
|
||||
mr = NULL;
|
||||
}
|
||||
populate_mtts = false;
|
||||
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
|
||||
if (access_flags & IB_ACCESS_ON_DEMAND) {
|
||||
err = -EINVAL;
|
||||
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
|
||||
goto error;
|
||||
}
|
||||
use_umr = false;
|
||||
populate_mtts = true;
|
||||
}
|
||||
|
||||
if (!mr) {
|
||||
if (!umr_can_modify_entity_size(dev))
|
||||
populate_mtts = true;
|
||||
mutex_lock(&dev->slow_path_mutex);
|
||||
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
|
||||
page_shift, access_flags, !use_umr);
|
||||
page_shift, access_flags, populate_mtts);
|
||||
mutex_unlock(&dev->slow_path_mutex);
|
||||
}
|
||||
|
||||
@@ -1271,7 +1385,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
update_odp_mr(mr);
|
||||
#endif
|
||||
|
||||
if (use_umr) {
|
||||
if (!populate_mtts) {
|
||||
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
|
||||
|
||||
if (access_flags & IB_ACCESS_ON_DEMAND)
|
||||
@@ -1286,7 +1400,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
mr->live = 1;
|
||||
#endif
|
||||
return &mr->ibmr;
|
||||
error:
|
||||
ib_umem_release(umem);
|
||||
@@ -1365,36 +1481,34 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||
ib_umem_release(mr->umem);
|
||||
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
|
||||
&npages, &page_shift, &ncont, &order);
|
||||
if (err < 0) {
|
||||
clean_mr(dev, mr);
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
|
||||
/*
|
||||
* UMR can't be used - MKey needs to be replaced.
|
||||
*/
|
||||
if (mr->allocated_from_cache) {
|
||||
if (mr->allocated_from_cache)
|
||||
err = unreg_umr(dev, mr);
|
||||
if (err)
|
||||
mlx5_ib_warn(dev, "Failed to unregister MR\n");
|
||||
} else {
|
||||
else
|
||||
err = destroy_mkey(dev, mr);
|
||||
if (err)
|
||||
mlx5_ib_warn(dev, "Failed to destroy MKey\n");
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
goto err;
|
||||
|
||||
mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
|
||||
page_shift, access_flags, true);
|
||||
|
||||
if (IS_ERR(mr))
|
||||
return PTR_ERR(mr);
|
||||
if (IS_ERR(mr)) {
|
||||
err = PTR_ERR(mr);
|
||||
mr = to_mmr(ib_mr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
mr->allocated_from_cache = 0;
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
mr->live = 1;
|
||||
#endif
|
||||
} else {
|
||||
/*
|
||||
* Send a UMR WQE
|
||||
@@ -1417,13 +1531,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||
err = rereg_umr(pd, mr, access_flags, flags);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "Failed to rereg UMR\n");
|
||||
ib_umem_release(mr->umem);
|
||||
mr->umem = NULL;
|
||||
clean_mr(dev, mr);
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
||||
set_mr_fileds(dev, mr, npages, len, access_flags);
|
||||
@@ -1432,6 +1541,14 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||
update_odp_mr(mr);
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (mr->umem) {
|
||||
ib_umem_release(mr->umem);
|
||||
mr->umem = NULL;
|
||||
}
|
||||
clean_mr(dev, mr);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1480,10 +1597,9 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
|
||||
}
|
||||
}
|
||||
|
||||
static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
int allocated_from_cache = mr->allocated_from_cache;
|
||||
int err;
|
||||
|
||||
if (mr->sig) {
|
||||
if (mlx5_core_destroy_psv(dev->mdev,
|
||||
@@ -1500,21 +1616,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
|
||||
mlx5_free_priv_descs(mr);
|
||||
|
||||
if (!allocated_from_cache) {
|
||||
u32 key = mr->mmkey.key;
|
||||
|
||||
err = destroy_mkey(dev, mr);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
|
||||
key, err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (!allocated_from_cache)
|
||||
destroy_mkey(dev, mr);
|
||||
}
|
||||
|
||||
static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
int npages = mr->npages;
|
||||
struct ib_umem *umem = mr->umem;
|
||||
@@ -1555,16 +1661,12 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
kfree(mr);
|
||||
else
|
||||
mlx5_mr_cache_free(dev, mr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
|
||||
struct mlx5_ib_mr *mr = to_mmr(ibmr);
|
||||
|
||||
return dereg_mr(dev, mr);
|
||||
dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
@@ -1645,7 +1747,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
||||
goto err_free_in;
|
||||
}
|
||||
|
||||
MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
|
||||
MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
|
||||
MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
|
||||
MLX5_SET(mkc, mkc, umr_en, 1);
|
||||
|
||||
mr->ibmr.device = pd->device;
|
||||
@@ -1726,7 +1829,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
|
||||
MLX5_SET(mkc, mkc, umr_en, 1);
|
||||
MLX5_SET(mkc, mkc, lr, 1);
|
||||
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_KLMS);
|
||||
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
|
||||
MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
|
||||
MLX5_SET(mkc, mkc, qpn, 0xffffff);
|
||||
|
||||
|
新增問題並參考
封鎖使用者