tlb.h 1.1 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_TLB_H
  3. #define _ASM_X86_TLB_H
  4. #define tlb_flush tlb_flush
  5. static inline void tlb_flush(struct mmu_gather *tlb);
  6. #include <asm-generic/tlb.h>
  7. static inline void tlb_flush(struct mmu_gather *tlb)
  8. {
  9. unsigned long start = 0UL, end = TLB_FLUSH_ALL;
  10. unsigned int stride_shift = tlb_get_unmap_shift(tlb);
  11. if (!tlb->fullmm && !tlb->need_flush_all) {
  12. start = tlb->start;
  13. end = tlb->end;
  14. }
  15. flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
  16. }
  17. /*
  18. * While x86 architecture in general requires an IPI to perform TLB
  19. * shootdown, enablement code for several hypervisors overrides
  20. * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
  21. * a hypercall. To keep software pagetable walkers safe in this case we
  22. * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment
  23. * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
  24. * for more details.
  25. */
  26. static inline void __tlb_remove_table(void *table)
  27. {
  28. free_page_and_swap_cache(table);
  29. }
  30. #endif /* _ASM_X86_TLB_H */