init-common.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * PowerPC version
  4. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  5. *
  6. * Modifications by Paul Mackerras (PowerMac) ([email protected])
  7. * and Cort Dougan (PReP) ([email protected])
  8. * Copyright (C) 1996 Paul Mackerras
  9. *
  10. * Derived from "arch/i386/mm/init.c"
  11. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  12. *
  13. * Dave Engebretsen <[email protected]>
  14. * Rework for PPC64 port.
  15. */
  16. #undef DEBUG
  17. #include <linux/string.h>
  18. #include <linux/pgtable.h>
  19. #include <asm/pgalloc.h>
  20. #include <asm/kup.h>
  21. #include <asm/smp.h>
  22. phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
  23. EXPORT_SYMBOL_GPL(memstart_addr);
  24. phys_addr_t kernstart_addr __ro_after_init;
  25. EXPORT_SYMBOL_GPL(kernstart_addr);
  26. unsigned long kernstart_virt_addr __ro_after_init = KERNELBASE;
  27. EXPORT_SYMBOL_GPL(kernstart_virt_addr);
  28. bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
  29. bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
  30. static int __init parse_nosmep(char *p)
  31. {
  32. if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
  33. return 0;
  34. disable_kuep = true;
  35. pr_warn("Disabling Kernel Userspace Execution Prevention\n");
  36. return 0;
  37. }
  38. early_param("nosmep", parse_nosmep);
  39. static int __init parse_nosmap(char *p)
  40. {
  41. disable_kuap = true;
  42. pr_warn("Disabling Kernel Userspace Access Protection\n");
  43. return 0;
  44. }
  45. early_param("nosmap", parse_nosmap);
  46. void __weak setup_kuep(bool disabled)
  47. {
  48. if (!IS_ENABLED(CONFIG_PPC_KUEP) || disabled)
  49. return;
  50. if (smp_processor_id() != boot_cpuid)
  51. return;
  52. pr_info("Activating Kernel Userspace Execution Prevention\n");
  53. }
  54. void setup_kup(void)
  55. {
  56. setup_kuap(disable_kuap);
  57. setup_kuep(disable_kuep);
  58. }
  59. #define CTOR(shift) static void ctor_##shift(void *addr) \
  60. { \
  61. memset(addr, 0, sizeof(void *) << (shift)); \
  62. }
  63. CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7);
  64. CTOR(8); CTOR(9); CTOR(10); CTOR(11); CTOR(12); CTOR(13); CTOR(14); CTOR(15);
  65. static inline void (*ctor(int shift))(void *)
  66. {
  67. BUILD_BUG_ON(MAX_PGTABLE_INDEX_SIZE != 15);
  68. switch (shift) {
  69. case 0: return ctor_0;
  70. case 1: return ctor_1;
  71. case 2: return ctor_2;
  72. case 3: return ctor_3;
  73. case 4: return ctor_4;
  74. case 5: return ctor_5;
  75. case 6: return ctor_6;
  76. case 7: return ctor_7;
  77. case 8: return ctor_8;
  78. case 9: return ctor_9;
  79. case 10: return ctor_10;
  80. case 11: return ctor_11;
  81. case 12: return ctor_12;
  82. case 13: return ctor_13;
  83. case 14: return ctor_14;
  84. case 15: return ctor_15;
  85. }
  86. return NULL;
  87. }
  88. struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE + 1];
  89. EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */
  90. /*
  91. * Create a kmem_cache() for pagetables. This is not used for PTE
  92. * pages - they're linked to struct page, come from the normal free
  93. * pages pool and have a different entry size (see real_pte_t) to
  94. * everything else. Caches created by this function are used for all
  95. * the higher level pagetables, and for hugepage pagetables.
  96. */
  97. void pgtable_cache_add(unsigned int shift)
  98. {
  99. char *name;
  100. unsigned long table_size = sizeof(void *) << shift;
  101. unsigned long align = table_size;
  102. /* When batching pgtable pointers for RCU freeing, we store
  103. * the index size in the low bits. Table alignment must be
  104. * big enough to fit it.
  105. *
  106. * Likewise, hugeapge pagetable pointers contain a (different)
  107. * shift value in the low bits. All tables must be aligned so
  108. * as to leave enough 0 bits in the address to contain it. */
  109. unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
  110. HUGEPD_SHIFT_MASK + 1);
  111. struct kmem_cache *new;
  112. /* It would be nice if this was a BUILD_BUG_ON(), but at the
  113. * moment, gcc doesn't seem to recognize is_power_of_2 as a
  114. * constant expression, so so much for that. */
  115. BUG_ON(!is_power_of_2(minalign));
  116. BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
  117. if (PGT_CACHE(shift))
  118. return; /* Already have a cache of this size */
  119. align = max_t(unsigned long, align, minalign);
  120. name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
  121. new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
  122. if (!new)
  123. panic("Could not allocate pgtable cache for order %d", shift);
  124. kfree(name);
  125. pgtable_cache[shift] = new;
  126. pr_debug("Allocated pgtable cache for order %d\n", shift);
  127. }
  128. EXPORT_SYMBOL_GPL(pgtable_cache_add); /* used by kvm_hv module */
  129. void pgtable_cache_init(void)
  130. {
  131. pgtable_cache_add(PGD_INDEX_SIZE);
  132. if (PMD_CACHE_INDEX)
  133. pgtable_cache_add(PMD_CACHE_INDEX);
  134. /*
  135. * In all current configs, when the PUD index exists it's the
  136. * same size as either the pgd or pmd index except with THP enabled
  137. * on book3s 64
  138. */
  139. if (PUD_CACHE_INDEX)
  140. pgtable_cache_add(PUD_CACHE_INDEX);
  141. }