tlb.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /* include/asm-generic/tlb.h
  3. *
  4. * Generic TLB shootdown code
  5. *
  6. * Copyright 2001 Red Hat, Inc.
  7. * Based on code from mm/memory.c Copyright Linus Torvalds and others.
  8. *
  9. * Copyright 2011 Red Hat, Inc., Peter Zijlstra
  10. */
  11. #ifndef _ASM_GENERIC__TLB_H
  12. #define _ASM_GENERIC__TLB_H
  13. #include <linux/mmu_notifier.h>
  14. #include <linux/swap.h>
  15. #include <linux/hugetlb_inline.h>
  16. #include <asm/tlbflush.h>
  17. #include <asm/cacheflush.h>
  18. /*
  19. * Blindly accessing user memory from NMI context can be dangerous
  20. * if we're in the middle of switching the current user task or switching
  21. * the loaded mm.
  22. */
  23. #ifndef nmi_uaccess_okay
  24. # define nmi_uaccess_okay() true
  25. #endif
  26. #ifdef CONFIG_MMU
  27. /*
  28. * Generic MMU-gather implementation.
  29. *
  30. * The mmu_gather data structure is used by the mm code to implement the
  31. * correct and efficient ordering of freeing pages and TLB invalidations.
  32. *
  33. * This correct ordering is:
  34. *
  35. * 1) unhook page
  36. * 2) TLB invalidate page
  37. * 3) free page
  38. *
  39. * That is, we must never free a page before we have ensured there are no live
  40. * translations left to it. Otherwise it might be possible to observe (or
  41. * worse, change) the page content after it has been reused.
  42. *
  43. * The mmu_gather API consists of:
  44. *
  45. * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
  46. *
  47. * start and finish a mmu_gather
  48. *
  49. * Finish in particular will issue a (final) TLB invalidate and free
  50. * all (remaining) queued pages.
  51. *
  52. * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
  53. *
  54. * Defaults to flushing at tlb_end_vma() to reset the range; helps when
  55. * there's large holes between the VMAs.
  56. *
  57. * - tlb_remove_table()
  58. *
  59. * tlb_remove_table() is the basic primitive to free page-table directories
  60. * (__p*_free_tlb()). In it's most primitive form it is an alias for
  61. * tlb_remove_page() below, for when page directories are pages and have no
  62. * additional constraints.
  63. *
  64. * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
  65. *
  66. * - tlb_remove_page() / __tlb_remove_page()
  67. * - tlb_remove_page_size() / __tlb_remove_page_size()
  68. *
  69. * __tlb_remove_page_size() is the basic primitive that queues a page for
  70. * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
  71. * boolean indicating if the queue is (now) full and a call to
  72. * tlb_flush_mmu() is required.
  73. *
  74. * tlb_remove_page() and tlb_remove_page_size() imply the call to
  75. * tlb_flush_mmu() when required and has no return value.
  76. *
  77. * - tlb_change_page_size()
  78. *
  79. * call before __tlb_remove_page*() to set the current page-size; implies a
  80. * possible tlb_flush_mmu() call.
  81. *
  82. * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
  83. *
  84. * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
  85. * related state, like the range)
  86. *
  87. * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
  88. * whatever pages are still batched.
  89. *
  90. * - mmu_gather::fullmm
  91. *
  92. * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
  93. * the entire mm; this allows a number of optimizations.
  94. *
  95. * - We can ignore tlb_{start,end}_vma(); because we don't
  96. * care about ranges. Everything will be shot down.
  97. *
  98. * - (RISC) architectures that use ASIDs can cycle to a new ASID
  99. * and delay the invalidation until ASID space runs out.
  100. *
  101. * - mmu_gather::need_flush_all
  102. *
  103. * A flag that can be set by the arch code if it wants to force
  104. * flush the entire TLB irrespective of the range. For instance
  105. * x86-PAE needs this when changing top-level entries.
  106. *
  107. * And allows the architecture to provide and implement tlb_flush():
  108. *
  109. * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
  110. * use of:
  111. *
  112. * - mmu_gather::start / mmu_gather::end
  113. *
  114. * which provides the range that needs to be flushed to cover the pages to
  115. * be freed.
  116. *
  117. * - mmu_gather::freed_tables
  118. *
  119. * set when we freed page table pages
  120. *
  121. * - tlb_get_unmap_shift() / tlb_get_unmap_size()
  122. *
  123. * returns the smallest TLB entry size unmapped in this range.
  124. *
  125. * If an architecture does not provide tlb_flush() a default implementation
  126. * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
  127. * specified, in which case we'll default to flush_tlb_mm().
  128. *
  129. * Additionally there are a few opt-in features:
  130. *
  131. * MMU_GATHER_PAGE_SIZE
  132. *
  133. * This ensures we call tlb_flush() every time tlb_change_page_size() actually
  134. * changes the size and provides mmu_gather::page_size to tlb_flush().
  135. *
  136. * This might be useful if your architecture has size specific TLB
  137. * invalidation instructions.
  138. *
  139. * MMU_GATHER_TABLE_FREE
  140. *
  141. * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
  142. * for page directores (__p*_free_tlb()).
  143. *
  144. * Useful if your architecture has non-page page directories.
  145. *
  146. * When used, an architecture is expected to provide __tlb_remove_table()
  147. * which does the actual freeing of these pages.
  148. *
  149. * MMU_GATHER_RCU_TABLE_FREE
  150. *
  151. * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
  152. * comment below).
  153. *
  154. * Useful if your architecture doesn't use IPIs for remote TLB invalidates
  155. * and therefore doesn't naturally serialize with software page-table walkers.
  156. *
  157. * MMU_GATHER_NO_FLUSH_CACHE
  158. *
  159. * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
  160. * before unmapping a VMA.
  161. *
  162. * NOTE: strictly speaking we shouldn't have this knob and instead rely on
  163. * flush_cache_range() being a NOP, except Sparc64 seems to be
  164. * different here.
  165. *
  166. * MMU_GATHER_MERGE_VMAS
  167. *
  168. * Indicates the architecture wants to merge ranges over VMAs; typical when
  169. * multiple range invalidates are more expensive than a full invalidate.
  170. *
  171. * MMU_GATHER_NO_RANGE
  172. *
  173. * Use this if your architecture lacks an efficient flush_tlb_range(). This
  174. * option implies MMU_GATHER_MERGE_VMAS above.
  175. *
  176. * MMU_GATHER_NO_GATHER
  177. *
  178. * If the option is set the mmu_gather will not track individual pages for
  179. * delayed page free anymore. A platform that enables the option needs to
  180. * provide its own implementation of the __tlb_remove_page_size() function to
  181. * free pages.
  182. *
  183. * This is useful if your architecture already flushes TLB entries in the
  184. * various ptep_get_and_clear() functions.
  185. */
  186. #ifdef CONFIG_MMU_GATHER_TABLE_FREE
  187. struct mmu_table_batch {
  188. #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
  189. struct rcu_head rcu;
  190. #endif
  191. unsigned int nr;
  192. void *tables[];
  193. };
  194. #define MAX_TABLE_BATCH \
  195. ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
  196. extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
  197. #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
  198. /*
  199. * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
  200. * page directories and we can use the normal page batching to free them.
  201. */
  202. #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
  203. #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
  204. #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
  205. /*
  206. * This allows an architecture that does not use the linux page-tables for
  207. * hardware to skip the TLBI when freeing page tables.
  208. */
  209. #ifndef tlb_needs_table_invalidate
  210. #define tlb_needs_table_invalidate() (true)
  211. #endif
  212. void tlb_remove_table_sync_one(void);
  213. #else
  214. #ifdef tlb_needs_table_invalidate
  215. #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
  216. #endif
  217. static inline void tlb_remove_table_sync_one(void) { }
  218. #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
  219. #ifndef CONFIG_MMU_GATHER_NO_GATHER
  220. /*
  221. * If we can't allocate a page to make a big batch of page pointers
  222. * to work on, then just handle a few from the on-stack structure.
  223. */
  224. #define MMU_GATHER_BUNDLE 8
  225. struct mmu_gather_batch {
  226. struct mmu_gather_batch *next;
  227. unsigned int nr;
  228. unsigned int max;
  229. struct page *pages[];
  230. };
  231. #define MAX_GATHER_BATCH \
  232. ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
  233. /*
  234. * Limit the maximum number of mmu_gather batches to reduce a risk of soft
  235. * lockups for non-preemptible kernels on huge machines when a lot of memory
  236. * is zapped during unmapping.
  237. * 10K pages freed at once should be safe even without a preemption point.
  238. */
  239. #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
  240. extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
  241. int page_size);
  242. #endif
  243. /*
  244. * struct mmu_gather is an opaque type used by the mm code for passing around
  245. * any data needed by arch specific code for tlb_remove_page.
  246. */
  247. struct mmu_gather {
  248. struct mm_struct *mm;
  249. #ifdef CONFIG_MMU_GATHER_TABLE_FREE
  250. struct mmu_table_batch *batch;
  251. #endif
  252. unsigned long start;
  253. unsigned long end;
  254. /*
  255. * we are in the middle of an operation to clear
  256. * a full mm and can make some optimizations
  257. */
  258. unsigned int fullmm : 1;
  259. /*
  260. * we have performed an operation which
  261. * requires a complete flush of the tlb
  262. */
  263. unsigned int need_flush_all : 1;
  264. /*
  265. * we have removed page directories
  266. */
  267. unsigned int freed_tables : 1;
  268. /*
  269. * at which levels have we cleared entries?
  270. */
  271. unsigned int cleared_ptes : 1;
  272. unsigned int cleared_pmds : 1;
  273. unsigned int cleared_puds : 1;
  274. unsigned int cleared_p4ds : 1;
  275. /*
  276. * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
  277. */
  278. unsigned int vma_exec : 1;
  279. unsigned int vma_huge : 1;
  280. unsigned int vma_pfn : 1;
  281. unsigned int batch_count;
  282. #ifndef CONFIG_MMU_GATHER_NO_GATHER
  283. struct mmu_gather_batch *active;
  284. struct mmu_gather_batch local;
  285. struct page *__pages[MMU_GATHER_BUNDLE];
  286. #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
  287. unsigned int page_size;
  288. #endif
  289. #endif
  290. };
  291. void tlb_flush_mmu(struct mmu_gather *tlb);
  292. static inline void __tlb_adjust_range(struct mmu_gather *tlb,
  293. unsigned long address,
  294. unsigned int range_size)
  295. {
  296. tlb->start = min(tlb->start, address);
  297. tlb->end = max(tlb->end, address + range_size);
  298. }
  299. static inline void __tlb_reset_range(struct mmu_gather *tlb)
  300. {
  301. if (tlb->fullmm) {
  302. tlb->start = tlb->end = ~0;
  303. } else {
  304. tlb->start = TASK_SIZE;
  305. tlb->end = 0;
  306. }
  307. tlb->freed_tables = 0;
  308. tlb->cleared_ptes = 0;
  309. tlb->cleared_pmds = 0;
  310. tlb->cleared_puds = 0;
  311. tlb->cleared_p4ds = 0;
  312. /*
  313. * Do not reset mmu_gather::vma_* fields here, we do not
  314. * call into tlb_start_vma() again to set them if there is an
  315. * intermediate flush.
  316. */
  317. }
  318. #ifdef CONFIG_MMU_GATHER_NO_RANGE
  319. #if defined(tlb_flush)
  320. #error MMU_GATHER_NO_RANGE relies on default tlb_flush()
  321. #endif
  322. /*
  323. * When an architecture does not have efficient means of range flushing TLBs
  324. * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
  325. * range small. We equally don't have to worry about page granularity or other
  326. * things.
  327. *
  328. * All we need to do is issue a full flush for any !0 range.
  329. */
  330. static inline void tlb_flush(struct mmu_gather *tlb)
  331. {
  332. if (tlb->end)
  333. flush_tlb_mm(tlb->mm);
  334. }
  335. #else /* CONFIG_MMU_GATHER_NO_RANGE */
  336. #ifndef tlb_flush
  337. /*
  338. * When an architecture does not provide its own tlb_flush() implementation
  339. * but does have a reasonably efficient flush_vma_range() implementation
  340. * use that.
  341. */
  342. static inline void tlb_flush(struct mmu_gather *tlb)
  343. {
  344. if (tlb->fullmm || tlb->need_flush_all) {
  345. flush_tlb_mm(tlb->mm);
  346. } else if (tlb->end) {
  347. struct vm_area_struct vma = {
  348. .vm_mm = tlb->mm,
  349. .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
  350. (tlb->vma_huge ? VM_HUGETLB : 0),
  351. };
  352. flush_tlb_range(&vma, tlb->start, tlb->end);
  353. }
  354. }
  355. #endif
  356. #endif /* CONFIG_MMU_GATHER_NO_RANGE */
  357. static inline void
  358. tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
  359. {
  360. /*
  361. * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
  362. * mips-4k) flush only large pages.
  363. *
  364. * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
  365. * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
  366. * range.
  367. *
  368. * We rely on tlb_end_vma() to issue a flush, such that when we reset
  369. * these values the batch is empty.
  370. */
  371. tlb->vma_huge = is_vm_hugetlb_page(vma);
  372. tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
  373. tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
  374. }
  375. static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  376. {
  377. /*
  378. * Anything calling __tlb_adjust_range() also sets at least one of
  379. * these bits.
  380. */
  381. if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
  382. tlb->cleared_puds || tlb->cleared_p4ds))
  383. return;
  384. tlb_flush(tlb);
  385. mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
  386. __tlb_reset_range(tlb);
  387. }
  388. static inline void tlb_remove_page_size(struct mmu_gather *tlb,
  389. struct page *page, int page_size)
  390. {
  391. if (__tlb_remove_page_size(tlb, page, page_size))
  392. tlb_flush_mmu(tlb);
  393. }
  394. static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  395. {
  396. return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
  397. }
  398. /* tlb_remove_page
  399. * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
  400. * required.
  401. */
  402. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  403. {
  404. return tlb_remove_page_size(tlb, page, PAGE_SIZE);
  405. }
  406. static inline void tlb_change_page_size(struct mmu_gather *tlb,
  407. unsigned int page_size)
  408. {
  409. #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
  410. if (tlb->page_size && tlb->page_size != page_size) {
  411. if (!tlb->fullmm && !tlb->need_flush_all)
  412. tlb_flush_mmu(tlb);
  413. }
  414. tlb->page_size = page_size;
  415. #endif
  416. }
  417. static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
  418. {
  419. if (tlb->cleared_ptes)
  420. return PAGE_SHIFT;
  421. if (tlb->cleared_pmds)
  422. return PMD_SHIFT;
  423. if (tlb->cleared_puds)
  424. return PUD_SHIFT;
  425. if (tlb->cleared_p4ds)
  426. return P4D_SHIFT;
  427. return PAGE_SHIFT;
  428. }
  429. static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
  430. {
  431. return 1UL << tlb_get_unmap_shift(tlb);
  432. }
  433. /*
  434. * In the case of tlb vma handling, we can optimise these away in the
  435. * case where we're doing a full MM flush. When we're doing a munmap,
  436. * the vmas are adjusted to only cover the region to be torn down.
  437. */
  438. static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  439. {
  440. if (tlb->fullmm)
  441. return;
  442. tlb_update_vma_flags(tlb, vma);
  443. #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
  444. flush_cache_range(vma, vma->vm_start, vma->vm_end);
  445. #endif
  446. }
  447. static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  448. {
  449. if (tlb->fullmm)
  450. return;
  451. /*
  452. * VM_PFNMAP is more fragile because the core mm will not track the
  453. * page mapcount -- there might not be page-frames for these PFNs after
  454. * all. Force flush TLBs for such ranges to avoid munmap() vs
  455. * unmap_mapping_range() races.
  456. */
  457. if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
  458. /*
  459. * Do a TLB flush and reset the range at VMA boundaries; this avoids
  460. * the ranges growing with the unused space between consecutive VMAs.
  461. */
  462. tlb_flush_mmu_tlbonly(tlb);
  463. }
  464. }
  465. /*
  466. * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
  467. * and set corresponding cleared_*.
  468. */
  469. static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
  470. unsigned long address, unsigned long size)
  471. {
  472. __tlb_adjust_range(tlb, address, size);
  473. tlb->cleared_ptes = 1;
  474. }
  475. static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
  476. unsigned long address, unsigned long size)
  477. {
  478. __tlb_adjust_range(tlb, address, size);
  479. tlb->cleared_pmds = 1;
  480. }
  481. static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
  482. unsigned long address, unsigned long size)
  483. {
  484. __tlb_adjust_range(tlb, address, size);
  485. tlb->cleared_puds = 1;
  486. }
  487. static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
  488. unsigned long address, unsigned long size)
  489. {
  490. __tlb_adjust_range(tlb, address, size);
  491. tlb->cleared_p4ds = 1;
  492. }
  493. #ifndef __tlb_remove_tlb_entry
  494. #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
  495. #endif
  496. /**
  497. * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  498. *
  499. * Record the fact that pte's were really unmapped by updating the range,
  500. * so we can later optimise away the tlb invalidate. This helps when
  501. * userspace is unmapping already-unmapped pages, which happens quite a lot.
  502. */
  503. #define tlb_remove_tlb_entry(tlb, ptep, address) \
  504. do { \
  505. tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
  506. __tlb_remove_tlb_entry(tlb, ptep, address); \
  507. } while (0)
  508. #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
  509. do { \
  510. unsigned long _sz = huge_page_size(h); \
  511. if (_sz >= P4D_SIZE) \
  512. tlb_flush_p4d_range(tlb, address, _sz); \
  513. else if (_sz >= PUD_SIZE) \
  514. tlb_flush_pud_range(tlb, address, _sz); \
  515. else if (_sz >= PMD_SIZE) \
  516. tlb_flush_pmd_range(tlb, address, _sz); \
  517. else \
  518. tlb_flush_pte_range(tlb, address, _sz); \
  519. __tlb_remove_tlb_entry(tlb, ptep, address); \
  520. } while (0)
  521. /**
  522. * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
  523. * This is a nop so far, because only x86 needs it.
  524. */
  525. #ifndef __tlb_remove_pmd_tlb_entry
  526. #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
  527. #endif
  528. #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
  529. do { \
  530. tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
  531. __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
  532. } while (0)
  533. /**
  534. * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
  535. * invalidation. This is a nop so far, because only x86 needs it.
  536. */
  537. #ifndef __tlb_remove_pud_tlb_entry
  538. #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
  539. #endif
  540. #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
  541. do { \
  542. tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
  543. __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
  544. } while (0)
  545. /*
  546. * For things like page tables caches (ie caching addresses "inside" the
  547. * page tables, like x86 does), for legacy reasons, flushing an
  548. * individual page had better flush the page table caches behind it. This
  549. * is definitely how x86 works, for example. And if you have an
  550. * architected non-legacy page table cache (which I'm not aware of
  551. * anybody actually doing), you're going to have some architecturally
  552. * explicit flushing for that, likely *separate* from a regular TLB entry
  553. * flush, and thus you'd need more than just some range expansion..
  554. *
  555. * So if we ever find an architecture
  556. * that would want something that odd, I think it is up to that
  557. * architecture to do its own odd thing, not cause pain for others
  558. * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
  559. *
  560. * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
  561. */
  562. #ifndef pte_free_tlb
  563. #define pte_free_tlb(tlb, ptep, address) \
  564. do { \
  565. tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
  566. tlb->freed_tables = 1; \
  567. __pte_free_tlb(tlb, ptep, address); \
  568. } while (0)
  569. #endif
  570. #ifndef pmd_free_tlb
  571. #define pmd_free_tlb(tlb, pmdp, address) \
  572. do { \
  573. tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
  574. tlb->freed_tables = 1; \
  575. __pmd_free_tlb(tlb, pmdp, address); \
  576. } while (0)
  577. #endif
  578. #ifndef pud_free_tlb
  579. #define pud_free_tlb(tlb, pudp, address) \
  580. do { \
  581. tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
  582. tlb->freed_tables = 1; \
  583. __pud_free_tlb(tlb, pudp, address); \
  584. } while (0)
  585. #endif
  586. #ifndef p4d_free_tlb
  587. #define p4d_free_tlb(tlb, pudp, address) \
  588. do { \
  589. __tlb_adjust_range(tlb, address, PAGE_SIZE); \
  590. tlb->freed_tables = 1; \
  591. __p4d_free_tlb(tlb, pudp, address); \
  592. } while (0)
  593. #endif
  594. #ifndef pte_needs_flush
  595. static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
  596. {
  597. return true;
  598. }
  599. #endif
  600. #ifndef huge_pmd_needs_flush
  601. static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
  602. {
  603. return true;
  604. }
  605. #endif
  606. #endif /* CONFIG_MMU */
  607. #endif /* _ASM_GENERIC__TLB_H */