Merge branch 'odp_fixes' into hmm.git
From rdma.git Jason Gunthorpe says: ==================== This is a collection of general cleanups for ODP to clarify some of the flows around umem creation and use of the interval tree. ==================== The branch is based on v5.3-rc5 due to dependencies, and is being taken into hmm.git due to dependencies in the next patches. * odp_fixes: RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr RDMA/mlx5: Use ib_umem_start instead of umem.address RDMA/core: Make invalidate_range a device operation RDMA/odp: Use kvcalloc for the dma_list and page_list RDMA/odp: Check for overflow when computing the umem_odp end RDMA/odp: Provide ib_umem_odp_release() to undo the allocs RDMA/odp: Split creating a umem_odp from ib_umem_get RDMA/odp: Make the three ways to create a umem_odp clear RMDA/odp: Consolidate umem_odp initialization RDMA/odp: Make it clearer when a umem is an implicit ODP umem RDMA/odp: Iterate over the whole rbtree directly RDMA/odp: Use the common interval tree library instead of generic RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
21
mm/migrate.c
21
mm/migrate.c
@@ -767,12 +767,12 @@ recheck_buffers:
|
||||
}
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
spin_unlock(&mapping->private_lock);
|
||||
if (busy) {
|
||||
if (invalidated) {
|
||||
rc = -EAGAIN;
|
||||
goto unlock_buffers;
|
||||
}
|
||||
spin_unlock(&mapping->private_lock);
|
||||
invalidate_bh_lrus();
|
||||
invalidated = true;
|
||||
goto recheck_buffers;
|
||||
@@ -805,6 +805,8 @@ recheck_buffers:
|
||||
|
||||
rc = MIGRATEPAGE_SUCCESS;
|
||||
unlock_buffers:
|
||||
if (check_refs)
|
||||
spin_unlock(&mapping->private_lock);
|
||||
bh = head;
|
||||
do {
|
||||
unlock_buffer(bh);
|
||||
@@ -2328,16 +2330,13 @@ next:
|
||||
static void migrate_vma_collect(struct migrate_vma *migrate)
|
||||
{
|
||||
struct mmu_notifier_range range;
|
||||
struct mm_walk mm_walk;
|
||||
|
||||
mm_walk.pmd_entry = migrate_vma_collect_pmd;
|
||||
mm_walk.pte_entry = NULL;
|
||||
mm_walk.pte_hole = migrate_vma_collect_hole;
|
||||
mm_walk.hugetlb_entry = NULL;
|
||||
mm_walk.test_walk = NULL;
|
||||
mm_walk.vma = migrate->vma;
|
||||
mm_walk.mm = migrate->vma->vm_mm;
|
||||
mm_walk.private = migrate;
|
||||
struct mm_walk mm_walk = {
|
||||
.pmd_entry = migrate_vma_collect_pmd,
|
||||
.pte_hole = migrate_vma_collect_hole,
|
||||
.vma = migrate->vma,
|
||||
.mm = migrate->vma->vm_mm,
|
||||
.private = migrate,
|
||||
};
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
|
||||
migrate->start,
|
||||
|
Reference in New Issue
Block a user