kfence.h 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * x86 KFENCE support.
  4. *
  5. * Copyright (C) 2020, Google LLC.
  6. */
  7. #ifndef _ASM_X86_KFENCE_H
  8. #define _ASM_X86_KFENCE_H
  9. #ifndef MODULE
  10. #include <linux/bug.h>
  11. #include <linux/kfence.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/set_memory.h>
  15. #include <asm/tlbflush.h>
  16. /* Force 4K pages for __kfence_pool. */
  17. static inline bool arch_kfence_init_pool(void)
  18. {
  19. unsigned long addr;
  20. for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
  21. addr += PAGE_SIZE) {
  22. unsigned int level;
  23. if (!lookup_address(addr, &level))
  24. return false;
  25. if (level != PG_LEVEL_4K)
  26. set_memory_4k(addr, 1);
  27. }
  28. return true;
  29. }
  30. /* Protect the given page and flush TLB. */
  31. static inline bool kfence_protect_page(unsigned long addr, bool protect)
  32. {
  33. unsigned int level;
  34. pte_t *pte = lookup_address(addr, &level);
  35. if (WARN_ON(!pte || level != PG_LEVEL_4K))
  36. return false;
  37. /*
  38. * We need to avoid IPIs, as we may get KFENCE allocations or faults
  39. * with interrupts disabled. Therefore, the below is best-effort, and
  40. * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
  41. * lazy fault handling takes care of faults after the page is PRESENT.
  42. */
  43. if (protect)
  44. set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
  45. else
  46. set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
  47. /*
  48. * Flush this CPU's TLB, assuming whoever did the allocation/free is
  49. * likely to continue running on this CPU.
  50. */
  51. preempt_disable();
  52. flush_tlb_one_kernel(addr);
  53. preempt_enable();
  54. return true;
  55. }
  56. #endif /* !MODULE */
  57. #endif /* _ASM_X86_KFENCE_H */