e500.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Modifications by Kumar Gala ([email protected]) to support
  4. * E500 Book E processors.
  5. *
  6. * Copyright 2004,2010 Freescale Semiconductor, Inc.
  7. *
  8. * This file contains the routines for initializing the MMU
  9. * on the 4xx series of chips.
  10. * -- paulus
  11. *
  12. * Derived from arch/ppc/mm/init.c:
  13. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  14. *
  15. * Modifications by Paul Mackerras (PowerMac) ([email protected])
  16. * and Cort Dougan (PReP) ([email protected])
  17. * Copyright (C) 1996 Paul Mackerras
  18. *
  19. * Derived from "arch/i386/mm/init.c"
  20. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  21. */
  22. #include <linux/signal.h>
  23. #include <linux/sched.h>
  24. #include <linux/kernel.h>
  25. #include <linux/errno.h>
  26. #include <linux/string.h>
  27. #include <linux/types.h>
  28. #include <linux/ptrace.h>
  29. #include <linux/mman.h>
  30. #include <linux/mm.h>
  31. #include <linux/swap.h>
  32. #include <linux/stddef.h>
  33. #include <linux/vmalloc.h>
  34. #include <linux/init.h>
  35. #include <linux/delay.h>
  36. #include <linux/highmem.h>
  37. #include <linux/memblock.h>
  38. #include <linux/of_fdt.h>
  39. #include <asm/io.h>
  40. #include <asm/mmu_context.h>
  41. #include <asm/mmu.h>
  42. #include <linux/uaccess.h>
  43. #include <asm/smp.h>
  44. #include <asm/machdep.h>
  45. #include <asm/setup.h>
  46. #include <asm/paca.h>
  47. #include <mm/mmu_decl.h>
  48. unsigned int tlbcam_index;
  49. struct tlbcam TLBCAM[NUM_TLBCAMS];
  50. static struct {
  51. unsigned long start;
  52. unsigned long limit;
  53. phys_addr_t phys;
  54. } tlbcam_addrs[NUM_TLBCAMS];
  55. #ifdef CONFIG_PPC_85xx
  56. /*
  57. * Return PA for this VA if it is mapped by a CAM, or 0
  58. */
  59. phys_addr_t v_block_mapped(unsigned long va)
  60. {
  61. int b;
  62. for (b = 0; b < tlbcam_index; ++b)
  63. if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
  64. return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
  65. return 0;
  66. }
  67. /*
  68. * Return VA for a given PA or 0 if not mapped
  69. */
  70. unsigned long p_block_mapped(phys_addr_t pa)
  71. {
  72. int b;
  73. for (b = 0; b < tlbcam_index; ++b)
  74. if (pa >= tlbcam_addrs[b].phys
  75. && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
  76. +tlbcam_addrs[b].phys)
  77. return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
  78. return 0;
  79. }
  80. #endif
  81. /*
  82. * Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
  83. * in particular size must be a power of 4 between 4k and the max supported by
  84. * an implementation; max may further be limited by what can be represented in
  85. * an unsigned long (for example, 32-bit implementations cannot support a 4GB
  86. * size).
  87. */
  88. static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
  89. unsigned long size, unsigned long flags, unsigned int pid)
  90. {
  91. unsigned int tsize;
  92. tsize = __ilog2(size) - 10;
  93. #if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
  94. if ((flags & _PAGE_NO_CACHE) == 0)
  95. flags |= _PAGE_COHERENT;
  96. #endif
  97. TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
  98. TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
  99. TLBCAM[index].MAS2 = virt & PAGE_MASK;
  100. TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
  101. TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
  102. TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
  103. TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
  104. TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
  105. TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
  106. TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
  107. if (mmu_has_feature(MMU_FTR_BIG_PHYS))
  108. TLBCAM[index].MAS7 = (u64)phys >> 32;
  109. /* Below is unlikely -- only for large user pages or similar */
  110. if (pte_user(__pte(flags))) {
  111. TLBCAM[index].MAS3 |= MAS3_UR;
  112. TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
  113. TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
  114. } else {
  115. TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
  116. }
  117. tlbcam_addrs[index].start = virt;
  118. tlbcam_addrs[index].limit = virt + size - 1;
  119. tlbcam_addrs[index].phys = phys;
  120. }
  121. static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
  122. phys_addr_t phys)
  123. {
  124. unsigned int camsize = __ilog2(ram);
  125. unsigned int align = __ffs(virt | phys);
  126. unsigned long max_cam;
  127. if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
  128. /* Convert (4^max) kB to (2^max) bytes */
  129. max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
  130. camsize &= ~1U;
  131. align &= ~1U;
  132. } else {
  133. /* Convert (2^max) kB to (2^max) bytes */
  134. max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
  135. }
  136. if (camsize > align)
  137. camsize = align;
  138. if (camsize > max_cam)
  139. camsize = max_cam;
  140. return 1UL << camsize;
  141. }
  142. static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
  143. unsigned long ram, int max_cam_idx,
  144. bool dryrun, bool init)
  145. {
  146. int i;
  147. unsigned long amount_mapped = 0;
  148. unsigned long boundary;
  149. if (strict_kernel_rwx_enabled())
  150. boundary = (unsigned long)(_sinittext - _stext);
  151. else
  152. boundary = ram;
  153. /* Calculate CAM values */
  154. for (i = 0; boundary && i < max_cam_idx; i++) {
  155. unsigned long cam_sz;
  156. pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;
  157. cam_sz = calc_cam_sz(boundary, virt, phys);
  158. if (!dryrun)
  159. settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
  160. boundary -= cam_sz;
  161. amount_mapped += cam_sz;
  162. virt += cam_sz;
  163. phys += cam_sz;
  164. }
  165. for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
  166. unsigned long cam_sz;
  167. pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;
  168. cam_sz = calc_cam_sz(ram, virt, phys);
  169. if (!dryrun)
  170. settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
  171. ram -= cam_sz;
  172. amount_mapped += cam_sz;
  173. virt += cam_sz;
  174. phys += cam_sz;
  175. }
  176. if (dryrun)
  177. return amount_mapped;
  178. if (init) {
  179. loadcam_multi(0, i, max_cam_idx);
  180. tlbcam_index = i;
  181. } else {
  182. loadcam_multi(0, i, 0);
  183. WARN_ON(i > tlbcam_index);
  184. }
  185. #ifdef CONFIG_PPC64
  186. get_paca()->tcd.esel_next = i;
  187. get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
  188. get_paca()->tcd.esel_first = i;
  189. #endif
  190. return amount_mapped;
  191. }
  192. unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
  193. {
  194. unsigned long virt = PAGE_OFFSET;
  195. phys_addr_t phys = memstart_addr;
  196. return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
  197. }
  198. #ifdef CONFIG_PPC32
  199. #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
  200. #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
  201. #endif
  202. unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
  203. {
  204. return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
  205. }
  206. void flush_instruction_cache(void)
  207. {
  208. unsigned long tmp;
  209. tmp = mfspr(SPRN_L1CSR1);
  210. tmp |= L1CSR1_ICFI | L1CSR1_ICLFR;
  211. mtspr(SPRN_L1CSR1, tmp);
  212. isync();
  213. }
  214. /*
  215. * MMU_init_hw does the chip-specific initialization of the MMU hardware.
  216. */
  217. void __init MMU_init_hw(void)
  218. {
  219. flush_instruction_cache();
  220. }
  221. static unsigned long __init tlbcam_sz(int idx)
  222. {
  223. return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
  224. }
  225. void __init adjust_total_lowmem(void)
  226. {
  227. unsigned long ram;
  228. int i;
  229. /* adjust lowmem size to __max_low_memory */
  230. ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
  231. i = switch_to_as1();
  232. __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
  233. restore_to_as0(i, 0, NULL, 1);
  234. pr_info("Memory CAM mapping: ");
  235. for (i = 0; i < tlbcam_index - 1; i++)
  236. pr_cont("%lu/", tlbcam_sz(i) >> 20);
  237. pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
  238. (unsigned int)((total_lowmem - __max_low_memory) >> 20));
  239. memblock_set_current_limit(memstart_addr + __max_low_memory);
  240. }
  241. #ifdef CONFIG_STRICT_KERNEL_RWX
  242. void mmu_mark_rodata_ro(void)
  243. {
  244. unsigned long remapped;
  245. remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
  246. WARN_ON(__max_low_memory != remapped);
  247. }
  248. #endif
  249. void mmu_mark_initmem_nx(void)
  250. {
  251. /* Everything is done in mmu_mark_rodata_ro() */
  252. }
  253. void setup_initial_memory_limit(phys_addr_t first_memblock_base,
  254. phys_addr_t first_memblock_size)
  255. {
  256. phys_addr_t limit = first_memblock_base + first_memblock_size;
  257. /* 64M mapped initially according to head_fsl_booke.S */
  258. memblock_set_current_limit(min_t(u64, limit, 0x04000000));
  259. }
  260. #ifdef CONFIG_RELOCATABLE
  261. int __initdata is_second_reloc;
  262. notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
  263. {
  264. unsigned long base = kernstart_virt_addr;
  265. phys_addr_t size;
  266. kernstart_addr = start;
  267. if (is_second_reloc) {
  268. virt_phys_offset = PAGE_OFFSET - memstart_addr;
  269. kaslr_late_init();
  270. return;
  271. }
  272. /*
  273. * Relocatable kernel support based on processing of dynamic
  274. * relocation entries. Before we get the real memstart_addr,
  275. * We will compute the virt_phys_offset like this:
  276. * virt_phys_offset = stext.run - kernstart_addr
  277. *
  278. * stext.run = (KERNELBASE & ~0x3ffffff) +
  279. * (kernstart_addr & 0x3ffffff)
  280. * When we relocate, we have :
  281. *
  282. * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
  283. *
  284. * hence:
  285. * virt_phys_offset = (KERNELBASE & ~0x3ffffff) -
  286. * (kernstart_addr & ~0x3ffffff)
  287. *
  288. */
  289. start &= ~0x3ffffff;
  290. base &= ~0x3ffffff;
  291. virt_phys_offset = base - start;
  292. early_get_first_memblock_info(__va(dt_ptr), &size);
  293. /*
  294. * We now get the memstart_addr, then we should check if this
  295. * address is the same as what the PAGE_OFFSET map to now. If
  296. * not we have to change the map of PAGE_OFFSET to memstart_addr
  297. * and do a second relocation.
  298. */
  299. if (start != memstart_addr) {
  300. int n;
  301. long offset = start - memstart_addr;
  302. is_second_reloc = 1;
  303. n = switch_to_as1();
  304. /* map a 64M area for the second relocation */
  305. if (memstart_addr > start)
  306. map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
  307. false, true);
  308. else
  309. map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
  310. 0x4000000, CONFIG_LOWMEM_CAM_NUM,
  311. false, true);
  312. restore_to_as0(n, offset, __va(dt_ptr), 1);
  313. /* We should never reach here */
  314. panic("Relocation error");
  315. }
  316. kaslr_early_init(__va(dt_ptr), size);
  317. }
  318. #endif
  319. #endif