kasan_init.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. /*
  2. * Xtensa KASAN shadow map initialization
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2017 Cadence Design Systems Inc.
  9. */
  10. #include <linux/memblock.h>
  11. #include <linux/init_task.h>
  12. #include <linux/kasan.h>
  13. #include <linux/kernel.h>
  14. #include <asm/initialize_mmu.h>
  15. #include <asm/tlbflush.h>
  16. #include <asm/traps.h>
  17. void __init kasan_early_init(void)
  18. {
  19. unsigned long vaddr = KASAN_SHADOW_START;
  20. pmd_t *pmd = pmd_off_k(vaddr);
  21. int i;
  22. for (i = 0; i < PTRS_PER_PTE; ++i)
  23. set_pte(kasan_early_shadow_pte + i,
  24. mk_pte(virt_to_page(kasan_early_shadow_page),
  25. PAGE_KERNEL));
  26. for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
  27. BUG_ON(!pmd_none(*pmd));
  28. set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
  29. }
  30. early_trap_init();
  31. }
  32. static void __init populate(void *start, void *end)
  33. {
  34. unsigned long n_pages = (end - start) / PAGE_SIZE;
  35. unsigned long n_pmds = n_pages / PTRS_PER_PTE;
  36. unsigned long i, j;
  37. unsigned long vaddr = (unsigned long)start;
  38. pmd_t *pmd = pmd_off_k(vaddr);
  39. pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
  40. if (!pte)
  41. panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  42. __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
  43. pr_debug("%s: %p - %p\n", __func__, start, end);
  44. for (i = j = 0; i < n_pmds; ++i) {
  45. int k;
  46. for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
  47. phys_addr_t phys =
  48. memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
  49. 0,
  50. MEMBLOCK_ALLOC_ANYWHERE);
  51. if (!phys)
  52. panic("Failed to allocate page table page\n");
  53. set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
  54. }
  55. }
  56. for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
  57. set_pmd(pmd + i, __pmd((unsigned long)pte));
  58. local_flush_tlb_all();
  59. memset(start, 0, end - start);
  60. }
  61. void __init kasan_init(void)
  62. {
  63. int i;
  64. BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
  65. (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
  66. BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
  67. /*
  68. * Replace shadow map pages that cover addresses from VMALLOC area
  69. * start to the end of KSEG with clean writable pages.
  70. */
  71. populate(kasan_mem_to_shadow((void *)VMALLOC_START),
  72. kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
  73. /*
  74. * Write protect kasan_early_shadow_page and zero-initialize it again.
  75. */
  76. for (i = 0; i < PTRS_PER_PTE; ++i)
  77. set_pte(kasan_early_shadow_pte + i,
  78. mk_pte(virt_to_page(kasan_early_shadow_page),
  79. PAGE_KERNEL_RO));
  80. local_flush_tlb_all();
  81. memset(kasan_early_shadow_page, 0, PAGE_SIZE);
  82. /* At this point kasan is fully initialized. Enable error messages. */
  83. current->kasan_depth = 0;
  84. pr_info("KernelAddressSanitizer initialized\n");
  85. }