IB/mlx5: Implement on demand paging by adding support for MMU notifiers
* Implement the relevant invalidation functions (zap MTTs as needed) * Implement interlocking (and rollback in the page fault handlers) for cases of a racing notifier and fault. * With this patch we can now enable the capability bits for supporting RC send/receive/RDMA read/RDMA write, and UD send. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Shachar Raindel <raindel@mellanox.com> Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:

committed by
Roland Dreier

parent
eab668a6d0
commit
b4cfe447d4
@@ -325,6 +325,7 @@ struct mlx5_ib_mr {
|
||||
struct mlx5_ib_dev *dev;
|
||||
struct mlx5_create_mkey_mbox_out out;
|
||||
struct mlx5_core_sig_ctx *sig;
|
||||
int live;
|
||||
};
|
||||
|
||||
struct mlx5_ib_fast_reg_page_list {
|
||||
@@ -629,6 +630,8 @@ int __init mlx5_ib_odp_init(void);
|
||||
void mlx5_ib_odp_cleanup(void);
|
||||
void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
|
||||
void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
|
||||
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
|
||||
static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
|
||||
|
Reference in New Issue
Block a user