mm/mmu_notifier: use structure for invalidate_range_start/end calls v2
To avoid having to change many call sites everytime we want to add a parameter use a structure to group all parameters for the mmu_notifier invalidate_range_start/end cakks. No functional changes with this patch. [akpm@linux-foundation.org: coding style fixes] Link: http://lkml.kernel.org/r/20181205053628.3210-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Acked-by: Christian König <christian.koenig@amd.com> Acked-by: Jan Kara <jack@suse.cz> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Felix Kuehling <felix.kuehling@amd.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> From: Jérôme Glisse <jglisse@redhat.com> Subject: mm/mmu_notifier: use structure for invalidate_range_start/end calls v3 fix build warning in migrate.c when CONFIG_MMU_NOTIFIER=n Link: http://lkml.kernel.org/r/20181213171330.8489-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
5d6527a784
commit
ac46d4f3c4
@@ -1451,6 +1451,8 @@ struct mm_walk {
|
||||
void *private;
|
||||
};
|
||||
|
||||
struct mmu_notifier_range;
|
||||
|
||||
int walk_page_range(unsigned long addr, unsigned long end,
|
||||
struct mm_walk *walk);
|
||||
int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
|
||||
@@ -1459,8 +1461,8 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
||||
struct vm_area_struct *vma);
|
||||
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||
unsigned long *start, unsigned long *end,
|
||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
||||
struct mmu_notifier_range *range,
|
||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn);
|
||||
int follow_phys(struct vm_area_struct *vma, unsigned long address,
|
||||
|
@@ -220,11 +220,8 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
|
||||
unsigned long address);
|
||||
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
|
||||
unsigned long address, pte_t pte);
|
||||
extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end,
|
||||
bool blockable);
|
||||
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end,
|
||||
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
|
||||
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
|
||||
bool only_end);
|
||||
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end);
|
||||
@@ -268,33 +265,37 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
|
||||
__mmu_notifier_change_pte(mm, address, pte);
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline void
|
||||
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
|
||||
{
|
||||
if (mm_has_notifiers(mm))
|
||||
__mmu_notifier_invalidate_range_start(mm, start, end, true);
|
||||
if (mm_has_notifiers(range->mm)) {
|
||||
range->blockable = true;
|
||||
__mmu_notifier_invalidate_range_start(range);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline int
|
||||
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
|
||||
{
|
||||
if (mm_has_notifiers(mm))
|
||||
return __mmu_notifier_invalidate_range_start(mm, start, end, false);
|
||||
if (mm_has_notifiers(range->mm)) {
|
||||
range->blockable = false;
|
||||
return __mmu_notifier_invalidate_range_start(range);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline void
|
||||
mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
|
||||
{
|
||||
if (mm_has_notifiers(mm))
|
||||
__mmu_notifier_invalidate_range_end(mm, start, end, false);
|
||||
if (mm_has_notifiers(range->mm))
|
||||
__mmu_notifier_invalidate_range_end(range, false);
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline void
|
||||
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
|
||||
{
|
||||
if (mm_has_notifiers(mm))
|
||||
__mmu_notifier_invalidate_range_end(mm, start, end, true);
|
||||
if (mm_has_notifiers(range->mm))
|
||||
__mmu_notifier_invalidate_range_end(range, true);
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||
@@ -315,6 +316,17 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
|
||||
__mmu_notifier_mm_destroy(mm);
|
||||
}
|
||||
|
||||
|
||||
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
range->mm = mm;
|
||||
range->start = start;
|
||||
range->end = end;
|
||||
}
|
||||
|
||||
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
|
||||
({ \
|
||||
int __young; \
|
||||
@@ -427,6 +439,23 @@ extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
|
||||
|
||||
#else /* CONFIG_MMU_NOTIFIER */
|
||||
|
||||
struct mmu_notifier_range {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
};
|
||||
|
||||
static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
range->start = start;
|
||||
range->end = end;
|
||||
}
|
||||
|
||||
#define mmu_notifier_range_init(range, mm, start, end) \
|
||||
_mmu_notifier_range_init(range, start, end)
|
||||
|
||||
|
||||
static inline int mm_has_notifiers(struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
@@ -454,24 +483,24 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline void
|
||||
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline int
|
||||
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline
|
||||
void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static inline void
|
||||
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
|
||||
{
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user