kmsan.h 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * x86 KMSAN support.
  4. *
  5. * Copyright (C) 2022, Google LLC
  6. * Author: Alexander Potapenko <[email protected]>
  7. */
  8. #ifndef _ASM_X86_KMSAN_H
  9. #define _ASM_X86_KMSAN_H
  10. #ifndef MODULE
  11. #include <asm/cpu_entry_area.h>
  12. #include <asm/processor.h>
  13. #include <linux/mmzone.h>
  14. DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_shadow);
  15. DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_origin);
  16. /*
  17. * Functions below are declared in the header to make sure they are inlined.
  18. * They all are called from kmsan_get_metadata() for every memory access in
  19. * the kernel, so speed is important here.
  20. */
  21. /*
  22. * Compute metadata addresses for the CPU entry area on x86.
  23. */
  24. static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
  25. {
  26. unsigned long addr64 = (unsigned long)addr;
  27. char *metadata_array;
  28. unsigned long off;
  29. int cpu;
  30. if ((addr64 < CPU_ENTRY_AREA_BASE) ||
  31. (addr64 >= (CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE)))
  32. return NULL;
  33. cpu = (addr64 - CPU_ENTRY_AREA_BASE) / CPU_ENTRY_AREA_SIZE;
  34. off = addr64 - (unsigned long)get_cpu_entry_area(cpu);
  35. if ((off < 0) || (off >= CPU_ENTRY_AREA_SIZE))
  36. return NULL;
  37. metadata_array = is_origin ? cpu_entry_area_origin :
  38. cpu_entry_area_shadow;
  39. return &per_cpu(metadata_array[off], cpu);
  40. }
  41. /*
  42. * Taken from arch/x86/mm/physaddr.h to avoid using an instrumented version.
  43. */
  44. static inline bool kmsan_phys_addr_valid(unsigned long addr)
  45. {
  46. if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
  47. return !(addr >> boot_cpu_data.x86_phys_bits);
  48. else
  49. return true;
  50. }
  51. /*
  52. * Taken from arch/x86/mm/physaddr.c to avoid using an instrumented version.
  53. */
  54. static inline bool kmsan_virt_addr_valid(void *addr)
  55. {
  56. unsigned long x = (unsigned long)addr;
  57. unsigned long y = x - __START_KERNEL_map;
  58. /* use the carry flag to determine if x was < __START_KERNEL_map */
  59. if (unlikely(x > y)) {
  60. x = y + phys_base;
  61. if (y >= KERNEL_IMAGE_SIZE)
  62. return false;
  63. } else {
  64. x = y + (__START_KERNEL_map - PAGE_OFFSET);
  65. /* carry flag will be set if starting x was >= PAGE_OFFSET */
  66. if ((x > y) || !kmsan_phys_addr_valid(x))
  67. return false;
  68. }
  69. return pfn_valid(x >> PAGE_SHIFT);
  70. }
  71. #endif /* !MODULE */
  72. #endif /* _ASM_X86_KMSAN_H */