Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - large KASAN update to use arm's "software tag-based mode" - a few misc things - sh updates - ocfs2 updates - just about all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits) kernel/fork.c: mark 'stack_vm_area' with __maybe_unused memcg, oom: notify on oom killer invocation from the charge path mm, swap: fix swapoff with KSM pages include/linux/gfp.h: fix typo mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization memory_hotplug: add missing newlines to debugging output mm: remove __hugepage_set_anon_rmap() include/linux/vmstat.h: remove unused page state adjustment macro mm/page_alloc.c: allow error injection mm: migrate: drop unused argument of migrate_page_move_mapping() blkdev: avoid migration stalls for blkdev pages mm: migrate: provide buffer_migrate_page_norefs() mm: migrate: move migrate_page_lock_buffers() mm: migrate: lock buffers before migrate_page_move_mapping() mm: migration: factor out code to compute expected number of page references mm, page_alloc: enable pcpu_drain with zone capability kmemleak: add config to select auto scan mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init ...
This commit is contained in:
@@ -146,15 +146,12 @@ static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
|
||||
}
|
||||
|
||||
static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool blockable)
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct ib_ucontext_per_mm *per_mm =
|
||||
container_of(mn, struct ib_ucontext_per_mm, mn);
|
||||
|
||||
if (blockable)
|
||||
if (range->blockable)
|
||||
down_read(&per_mm->umem_rwsem);
|
||||
else if (!down_read_trylock(&per_mm->umem_rwsem))
|
||||
return -EAGAIN;
|
||||
@@ -169,9 +166,10 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
|
||||
return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
|
||||
range->end,
|
||||
invalidate_range_start_trampoline,
|
||||
blockable, NULL);
|
||||
range->blockable, NULL);
|
||||
}
|
||||
|
||||
static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
|
||||
@@ -182,9 +180,7 @@ static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
|
||||
}
|
||||
|
||||
static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct ib_ucontext_per_mm *per_mm =
|
||||
container_of(mn, struct ib_ucontext_per_mm, mn);
|
||||
@@ -192,8 +188,8 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
|
||||
if (unlikely(!per_mm->active))
|
||||
return;
|
||||
|
||||
rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start,
|
||||
end,
|
||||
rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
|
||||
range->end,
|
||||
invalidate_range_end_trampoline, true, NULL);
|
||||
up_read(&per_mm->umem_rwsem);
|
||||
}
|
||||
|
Reference in New Issue
Block a user