mm/mmu_notifier: avoid call to invalidate_range() in range_end()
This is an optimization patch that only affect mmu_notifier users which rely on the invalidate_range() callback. This patch avoids calling that callback twice in a row from inside __mmu_notifier_invalidate_range_end Existing pattern (before this patch): mmu_notifier_invalidate_range_start() pte/pmd/pud_clear_flush_notify() mmu_notifier_invalidate_range() mmu_notifier_invalidate_range_end() mmu_notifier_invalidate_range() New pattern (after this patch): mmu_notifier_invalidate_range_start() pte/pmd/pud_clear_flush_notify() mmu_notifier_invalidate_range() mmu_notifier_invalidate_range_only_end() We call the invalidate_range callback after clearing the page table under the page table lock and we skip the call to invalidate_range inside the __mmu_notifier_invalidate_range_end() function. Idea from Andrea Arcangeli Link: http://lkml.kernel.org/r/20171017031003.7481-3-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Alistair Popple <alistair@popple.id.au> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Andrew Donnellan <andrew.donnellan@au1.ibm.com> Cc: Nadav Amit <nadav.amit@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
此提交包含在:
@@ -190,7 +190,9 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
|
||||
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
|
||||
|
||||
void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
bool only_end)
|
||||
{
|
||||
struct mmu_notifier *mn;
|
||||
int id;
|
||||
@@ -204,8 +206,13 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
|
||||
* subsystem registers either invalidate_range_start()/end() or
|
||||
* invalidate_range(), so this will be no additional overhead
|
||||
* (besides the pointer check).
|
||||
*
|
||||
* We skip call to invalidate_range() if we know it is safe ie
|
||||
* call site use mmu_notifier_invalidate_range_only_end() which
|
||||
* is safe to do when we know that a call to invalidate_range()
|
||||
* already happen under page table lock.
|
||||
*/
|
||||
if (mn->ops->invalidate_range)
|
||||
if (!only_end && mn->ops->invalidate_range)
|
||||
mn->ops->invalidate_range(mn, mm, start, end);
|
||||
if (mn->ops->invalidate_range_end)
|
||||
mn->ops->invalidate_range_end(mn, mm, start, end);
|
||||
|
新增問題並參考
封鎖使用者