40x.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * This file contains the routines for initializing the MMU
  4. * on the 4xx series of chips.
  5. * -- paulus
  6. *
  7. * Derived from arch/ppc/mm/init.c:
  8. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  9. *
  10. * Modifications by Paul Mackerras (PowerMac) ([email protected])
  11. * and Cort Dougan (PReP) ([email protected])
  12. * Copyright (C) 1996 Paul Mackerras
  13. *
  14. * Derived from "arch/i386/mm/init.c"
  15. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  16. */
  17. #include <linux/signal.h>
  18. #include <linux/sched.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/string.h>
  22. #include <linux/types.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/mman.h>
  25. #include <linux/mm.h>
  26. #include <linux/swap.h>
  27. #include <linux/stddef.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/init.h>
  30. #include <linux/delay.h>
  31. #include <linux/highmem.h>
  32. #include <linux/memblock.h>
  33. #include <asm/io.h>
  34. #include <asm/mmu_context.h>
  35. #include <asm/mmu.h>
  36. #include <linux/uaccess.h>
  37. #include <asm/smp.h>
  38. #include <asm/bootx.h>
  39. #include <asm/machdep.h>
  40. #include <asm/setup.h>
  41. #include <mm/mmu_decl.h>
  42. /*
  43. * MMU_init_hw does the chip-specific initialization of the MMU hardware.
  44. */
  45. void __init MMU_init_hw(void)
  46. {
  47. /*
  48. * The Zone Protection Register (ZPR) defines how protection will
  49. * be applied to every page which is a member of a given zone. At
  50. * present, we utilize only two of the 4xx's zones.
  51. * The zone index bits (of ZSEL) in the PTE are used for software
  52. * indicators, except the LSB. For user access, zone 1 is used,
  53. * for kernel access, zone 0 is used. We set all but zone 1
  54. * to zero, allowing only kernel access as indicated in the PTE.
  55. * For zone 1, we set a 01 binary (a value of 10 will not work)
  56. * to allow user access as indicated in the PTE. This also allows
  57. * kernel access as indicated in the PTE.
  58. */
  59. mtspr(SPRN_ZPR, 0x10000000);
  60. flush_instruction_cache();
  61. /*
  62. * Set up the real-mode cache parameters for the exception vector
  63. * handlers (which are run in real-mode).
  64. */
  65. mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */
  66. /*
  67. * Cache instruction and data space where the exception
  68. * vectors and the kernel live in real-mode.
  69. */
  70. mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */
  71. mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */
  72. }
  73. #define LARGE_PAGE_SIZE_16M (1<<24)
  74. #define LARGE_PAGE_SIZE_4M (1<<22)
  75. unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
  76. {
  77. unsigned long v, s, mapped;
  78. phys_addr_t p;
  79. v = KERNELBASE;
  80. p = 0;
  81. s = total_lowmem;
  82. if (IS_ENABLED(CONFIG_KFENCE))
  83. return 0;
  84. if (debug_pagealloc_enabled())
  85. return 0;
  86. if (strict_kernel_rwx_enabled())
  87. return 0;
  88. while (s >= LARGE_PAGE_SIZE_16M) {
  89. pmd_t *pmdp;
  90. unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW;
  91. pmdp = pmd_off_k(v);
  92. *pmdp++ = __pmd(val);
  93. *pmdp++ = __pmd(val);
  94. *pmdp++ = __pmd(val);
  95. *pmdp++ = __pmd(val);
  96. v += LARGE_PAGE_SIZE_16M;
  97. p += LARGE_PAGE_SIZE_16M;
  98. s -= LARGE_PAGE_SIZE_16M;
  99. }
  100. while (s >= LARGE_PAGE_SIZE_4M) {
  101. pmd_t *pmdp;
  102. unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW;
  103. pmdp = pmd_off_k(v);
  104. *pmdp = __pmd(val);
  105. v += LARGE_PAGE_SIZE_4M;
  106. p += LARGE_PAGE_SIZE_4M;
  107. s -= LARGE_PAGE_SIZE_4M;
  108. }
  109. mapped = total_lowmem - s;
  110. /* If the size of RAM is not an exact power of two, we may not
  111. * have covered RAM in its entirety with 16 and 4 MiB
  112. * pages. Consequently, restrict the top end of RAM currently
  113. * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
  114. * coverage with normal-sized pages (or other reasons) do not
  115. * attempt to allocate outside the allowed range.
  116. */
  117. memblock_set_current_limit(mapped);
  118. return mapped;
  119. }
  120. void setup_initial_memory_limit(phys_addr_t first_memblock_base,
  121. phys_addr_t first_memblock_size)
  122. {
  123. /* We don't currently support the first MEMBLOCK not mapping 0
  124. * physical on those processors
  125. */
  126. BUG_ON(first_memblock_base != 0);
  127. /* 40x can only access 16MB at the moment (see head_40x.S) */
  128. memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
  129. }