mlx4: Implement memory windows allocation and deallocation
Implement MW allocation and deallocation in mlx4_core and mlx4_ib. Pass down the enable bind flag when registering memory regions. Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Shani Michaeli <shanim@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:

committed by
Roland Dreier

parent
e448834e35
commit
804d6a89a5
@@ -116,6 +116,11 @@ struct mlx4_ib_mr {
|
||||
struct ib_umem *umem;
|
||||
};
|
||||
|
||||
struct mlx4_ib_mw {
|
||||
struct ib_mw ibmw;
|
||||
struct mlx4_mw mmw;
|
||||
};
|
||||
|
||||
struct mlx4_ib_fast_reg_page_list {
|
||||
struct ib_fast_reg_page_list ibfrpl;
|
||||
__be64 *mapped_page_list;
|
||||
@@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
|
||||
return container_of(ibmr, struct mlx4_ib_mr, ibmr);
|
||||
}
|
||||
|
||||
static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
|
||||
{
|
||||
return container_of(ibmw, struct mlx4_ib_mw, ibmw);
|
||||
}
|
||||
|
||||
static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
|
||||
{
|
||||
return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl);
|
||||
@@ -581,6 +591,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_dereg_mr(struct ib_mr *mr);
|
||||
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
|
||||
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
|
||||
struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
|
||||
int max_page_list_len);
|
||||
struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
|
||||
|
@@ -41,9 +41,19 @@ static u32 convert_access(int acc)
|
||||
(acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
|
||||
(acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
|
||||
(acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
|
||||
(acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
|
||||
MLX4_PERM_LOCAL_READ;
|
||||
}
|
||||
|
||||
static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
|
||||
case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
|
||||
struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
{
|
||||
struct mlx4_ib_mr *mr;
|
||||
@@ -189,6 +199,48 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx4_ib_mw *mw;
|
||||
int err;
|
||||
|
||||
mw = kmalloc(sizeof(*mw), GFP_KERNEL);
|
||||
if (!mw)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
|
||||
to_mlx4_type(type), &mw->mmw);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
err = mlx4_mw_enable(dev->dev, &mw->mmw);
|
||||
if (err)
|
||||
goto err_mw;
|
||||
|
||||
mw->ibmw.rkey = mw->mmw.key;
|
||||
|
||||
return &mw->ibmw;
|
||||
|
||||
err_mw:
|
||||
mlx4_mw_free(dev->dev, &mw->mmw);
|
||||
|
||||
err_free:
|
||||
kfree(mw);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
|
||||
{
|
||||
struct mlx4_ib_mw *mw = to_mmw(ibmw);
|
||||
|
||||
mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
|
||||
kfree(mw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
|
||||
int max_page_list_len)
|
||||
{
|
||||
|
Reference in New Issue
Block a user