init.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * arch/xtensa/mm/init.c
  3. *
  4. * Derived from MIPS, PPC.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. * Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
  12. *
  13. * Chris Zankel <[email protected]>
  14. * Joe Taylor <[email protected], [email protected]>
  15. * Marc Gauthier
  16. * Kevin Chea
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/errno.h>
  20. #include <linux/memblock.h>
  21. #include <linux/gfp.h>
  22. #include <linux/highmem.h>
  23. #include <linux/swap.h>
  24. #include <linux/mman.h>
  25. #include <linux/nodemask.h>
  26. #include <linux/mm.h>
  27. #include <linux/of_fdt.h>
  28. #include <linux/dma-map-ops.h>
  29. #include <asm/bootparam.h>
  30. #include <asm/page.h>
  31. #include <asm/sections.h>
  32. #include <asm/sysmem.h>
  33. /*
  34. * Initialize the bootmem system and give it all low memory we have available.
  35. */
  36. void __init bootmem_init(void)
  37. {
  38. /* Reserve all memory below PHYS_OFFSET, as memory
  39. * accounting doesn't work for pages below that address.
  40. *
  41. * If PHYS_OFFSET is zero reserve page at address 0:
  42. * successfull allocations should never return NULL.
  43. */
  44. memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1);
  45. early_init_fdt_scan_reserved_mem();
  46. if (!memblock_phys_mem_size())
  47. panic("No memory found!\n");
  48. min_low_pfn = PFN_UP(memblock_start_of_DRAM());
  49. min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
  50. max_pfn = PFN_DOWN(memblock_end_of_DRAM());
  51. max_low_pfn = min(max_pfn, MAX_LOW_PFN);
  52. early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
  53. (phys_addr_t)max_low_pfn << PAGE_SHIFT);
  54. memblock_set_current_limit(PFN_PHYS(max_low_pfn));
  55. dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
  56. memblock_dump_all();
  57. }
  58. void __init zones_init(void)
  59. {
  60. /* All pages are DMA-able, so we put them all in the DMA zone. */
  61. unsigned long max_zone_pfn[MAX_NR_ZONES] = {
  62. [ZONE_NORMAL] = max_low_pfn,
  63. #ifdef CONFIG_HIGHMEM
  64. [ZONE_HIGHMEM] = max_pfn,
  65. #endif
  66. };
  67. free_area_init(max_zone_pfn);
  68. }
  69. static void __init free_highpages(void)
  70. {
  71. #ifdef CONFIG_HIGHMEM
  72. unsigned long max_low = max_low_pfn;
  73. phys_addr_t range_start, range_end;
  74. u64 i;
  75. /* set highmem page free */
  76. for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
  77. &range_start, &range_end, NULL) {
  78. unsigned long start = PFN_UP(range_start);
  79. unsigned long end = PFN_DOWN(range_end);
  80. /* Ignore complete lowmem entries */
  81. if (end <= max_low)
  82. continue;
  83. /* Truncate partial highmem entries */
  84. if (start < max_low)
  85. start = max_low;
  86. for (; start < end; start++)
  87. free_highmem_page(pfn_to_page(start));
  88. }
  89. #endif
  90. }
  91. /*
  92. * Initialize memory pages.
  93. */
  94. void __init mem_init(void)
  95. {
  96. free_highpages();
  97. max_mapnr = max_pfn - ARCH_PFN_OFFSET;
  98. high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
  99. memblock_free_all();
  100. pr_info("virtual kernel memory layout:\n"
  101. #ifdef CONFIG_KASAN
  102. " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
  103. #endif
  104. #ifdef CONFIG_MMU
  105. " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
  106. #endif
  107. #ifdef CONFIG_HIGHMEM
  108. " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
  109. " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
  110. #endif
  111. " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
  112. " .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
  113. " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
  114. " .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
  115. " .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
  116. " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
  117. #ifdef CONFIG_KASAN
  118. KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
  119. KASAN_SHADOW_SIZE >> 20,
  120. #endif
  121. #ifdef CONFIG_MMU
  122. VMALLOC_START, VMALLOC_END,
  123. (VMALLOC_END - VMALLOC_START) >> 20,
  124. #ifdef CONFIG_HIGHMEM
  125. PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
  126. (LAST_PKMAP*PAGE_SIZE) >> 10,
  127. FIXADDR_START, FIXADDR_END,
  128. (FIXADDR_END - FIXADDR_START) >> 10,
  129. #endif
  130. PAGE_OFFSET, PAGE_OFFSET +
  131. (max_low_pfn - min_low_pfn) * PAGE_SIZE,
  132. #else
  133. min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
  134. #endif
  135. ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
  136. (unsigned long)_text, (unsigned long)_etext,
  137. (unsigned long)(_etext - _text) >> 10,
  138. (unsigned long)__start_rodata, (unsigned long)__end_rodata,
  139. (unsigned long)(__end_rodata - __start_rodata) >> 10,
  140. (unsigned long)_sdata, (unsigned long)_edata,
  141. (unsigned long)(_edata - _sdata) >> 10,
  142. (unsigned long)__init_begin, (unsigned long)__init_end,
  143. (unsigned long)(__init_end - __init_begin) >> 10,
  144. (unsigned long)__bss_start, (unsigned long)__bss_stop,
  145. (unsigned long)(__bss_stop - __bss_start) >> 10);
  146. }
  147. static void __init parse_memmap_one(char *p)
  148. {
  149. char *oldp;
  150. unsigned long start_at, mem_size;
  151. if (!p)
  152. return;
  153. oldp = p;
  154. mem_size = memparse(p, &p);
  155. if (p == oldp)
  156. return;
  157. switch (*p) {
  158. case '@':
  159. start_at = memparse(p + 1, &p);
  160. memblock_add(start_at, mem_size);
  161. break;
  162. case '$':
  163. start_at = memparse(p + 1, &p);
  164. memblock_reserve(start_at, mem_size);
  165. break;
  166. case 0:
  167. memblock_reserve(mem_size, -mem_size);
  168. break;
  169. default:
  170. pr_warn("Unrecognized memmap syntax: %s\n", p);
  171. break;
  172. }
  173. }
  174. static int __init parse_memmap_opt(char *str)
  175. {
  176. while (str) {
  177. char *k = strchr(str, ',');
  178. if (k)
  179. *k++ = 0;
  180. parse_memmap_one(str);
  181. str = k;
  182. }
  183. return 0;
  184. }
  185. early_param("memmap", parse_memmap_opt);
  186. #ifdef CONFIG_MMU
  187. static const pgprot_t protection_map[16] = {
  188. [VM_NONE] = PAGE_NONE,
  189. [VM_READ] = PAGE_READONLY,
  190. [VM_WRITE] = PAGE_COPY,
  191. [VM_WRITE | VM_READ] = PAGE_COPY,
  192. [VM_EXEC] = PAGE_READONLY_EXEC,
  193. [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
  194. [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
  195. [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
  196. [VM_SHARED] = PAGE_NONE,
  197. [VM_SHARED | VM_READ] = PAGE_READONLY,
  198. [VM_SHARED | VM_WRITE] = PAGE_SHARED,
  199. [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
  200. [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
  201. [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
  202. [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
  203. [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
  204. };
  205. DECLARE_VM_GET_PAGE_PROT
  206. #endif