init_32.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. *
  6. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  7. */
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/memblock.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/memory_hotplug.h>
  29. #include <linux/initrd.h>
  30. #include <linux/cpumask.h>
  31. #include <linux/gfp.h>
  32. #include <asm/asm.h>
  33. #include <asm/bios_ebda.h>
  34. #include <asm/processor.h>
  35. #include <linux/uaccess.h>
  36. #include <asm/dma.h>
  37. #include <asm/fixmap.h>
  38. #include <asm/e820/api.h>
  39. #include <asm/apic.h>
  40. #include <asm/bugs.h>
  41. #include <asm/tlb.h>
  42. #include <asm/tlbflush.h>
  43. #include <asm/olpc_ofw.h>
  44. #include <asm/pgalloc.h>
  45. #include <asm/sections.h>
  46. #include <asm/paravirt.h>
  47. #include <asm/setup.h>
  48. #include <asm/set_memory.h>
  49. #include <asm/page_types.h>
  50. #include <asm/cpu_entry_area.h>
  51. #include <asm/init.h>
  52. #include <asm/pgtable_areas.h>
  53. #include <asm/numa.h>
  54. #include "mm_internal.h"
  55. unsigned long highstart_pfn, highend_pfn;
  56. bool __read_mostly __vmalloc_start_set = false;
  57. /*
  58. * Creates a middle page table and puts a pointer to it in the
  59. * given global directory entry. This only returns the gd entry
  60. * in non-PAE compilation mode, since the middle layer is folded.
  61. */
  62. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  63. {
  64. p4d_t *p4d;
  65. pud_t *pud;
  66. pmd_t *pmd_table;
  67. #ifdef CONFIG_X86_PAE
  68. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  69. pmd_table = (pmd_t *)alloc_low_page();
  70. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  71. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  72. p4d = p4d_offset(pgd, 0);
  73. pud = pud_offset(p4d, 0);
  74. BUG_ON(pmd_table != pmd_offset(pud, 0));
  75. return pmd_table;
  76. }
  77. #endif
  78. p4d = p4d_offset(pgd, 0);
  79. pud = pud_offset(p4d, 0);
  80. pmd_table = pmd_offset(pud, 0);
  81. return pmd_table;
  82. }
  83. /*
  84. * Create a page table and place a pointer to it in a middle page
  85. * directory entry:
  86. */
  87. static pte_t * __init one_page_table_init(pmd_t *pmd)
  88. {
  89. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  90. pte_t *page_table = (pte_t *)alloc_low_page();
  91. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  92. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  93. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  94. }
  95. return pte_offset_kernel(pmd, 0);
  96. }
  97. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  98. {
  99. int pgd_idx = pgd_index(vaddr);
  100. int pmd_idx = pmd_index(vaddr);
  101. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  102. }
  103. pte_t * __init populate_extra_pte(unsigned long vaddr)
  104. {
  105. int pte_idx = pte_index(vaddr);
  106. pmd_t *pmd;
  107. pmd = populate_extra_pmd(vaddr);
  108. return one_page_table_init(pmd) + pte_idx;
  109. }
  110. static unsigned long __init
  111. page_table_range_init_count(unsigned long start, unsigned long end)
  112. {
  113. unsigned long count = 0;
  114. #ifdef CONFIG_HIGHMEM
  115. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  116. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  117. int pgd_idx, pmd_idx;
  118. unsigned long vaddr;
  119. if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
  120. return 0;
  121. vaddr = start;
  122. pgd_idx = pgd_index(vaddr);
  123. pmd_idx = pmd_index(vaddr);
  124. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
  125. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  126. pmd_idx++) {
  127. if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
  128. (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
  129. count++;
  130. vaddr += PMD_SIZE;
  131. }
  132. pmd_idx = 0;
  133. }
  134. #endif
  135. return count;
  136. }
  137. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  138. unsigned long vaddr, pte_t *lastpte,
  139. void **adr)
  140. {
  141. #ifdef CONFIG_HIGHMEM
  142. /*
  143. * Something (early fixmap) may already have put a pte
  144. * page here, which causes the page table allocation
  145. * to become nonlinear. Attempt to fix it, and if it
  146. * is still nonlinear then we have to bug.
  147. */
  148. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  149. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  150. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  151. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  152. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
  153. pte_t *newpte;
  154. int i;
  155. BUG_ON(after_bootmem);
  156. newpte = *adr;
  157. for (i = 0; i < PTRS_PER_PTE; i++)
  158. set_pte(newpte + i, pte[i]);
  159. *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
  160. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  161. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  162. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  163. __flush_tlb_all();
  164. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  165. pte = newpte;
  166. }
  167. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  168. && vaddr > fix_to_virt(FIX_KMAP_END)
  169. && lastpte && lastpte + PTRS_PER_PTE != pte);
  170. #endif
  171. return pte;
  172. }
  173. /*
  174. * This function initializes a certain range of kernel virtual memory
  175. * with new bootmem page tables, everywhere page tables are missing in
  176. * the given range.
  177. *
  178. * NOTE: The pagetables are allocated contiguous on the physical space
  179. * so we can cache the place of the first one and move around without
  180. * checking the pgd every time.
  181. */
  182. static void __init
  183. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  184. {
  185. int pgd_idx, pmd_idx;
  186. unsigned long vaddr;
  187. pgd_t *pgd;
  188. pmd_t *pmd;
  189. pte_t *pte = NULL;
  190. unsigned long count = page_table_range_init_count(start, end);
  191. void *adr = NULL;
  192. if (count)
  193. adr = alloc_low_pages(count);
  194. vaddr = start;
  195. pgd_idx = pgd_index(vaddr);
  196. pmd_idx = pmd_index(vaddr);
  197. pgd = pgd_base + pgd_idx;
  198. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  199. pmd = one_md_table_init(pgd);
  200. pmd = pmd + pmd_index(vaddr);
  201. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  202. pmd++, pmd_idx++) {
  203. pte = page_table_kmap_check(one_page_table_init(pmd),
  204. pmd, vaddr, pte, &adr);
  205. vaddr += PMD_SIZE;
  206. }
  207. pmd_idx = 0;
  208. }
  209. }
  210. static inline int is_x86_32_kernel_text(unsigned long addr)
  211. {
  212. if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
  213. return 1;
  214. return 0;
  215. }
  216. /*
  217. * This maps the physical memory to kernel virtual address space, a total
  218. * of max_low_pfn pages, by creating page tables starting from address
  219. * PAGE_OFFSET:
  220. */
  221. unsigned long __init
  222. kernel_physical_mapping_init(unsigned long start,
  223. unsigned long end,
  224. unsigned long page_size_mask,
  225. pgprot_t prot)
  226. {
  227. int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
  228. unsigned long last_map_addr = end;
  229. unsigned long start_pfn, end_pfn;
  230. pgd_t *pgd_base = swapper_pg_dir;
  231. int pgd_idx, pmd_idx, pte_ofs;
  232. unsigned long pfn;
  233. pgd_t *pgd;
  234. pmd_t *pmd;
  235. pte_t *pte;
  236. unsigned pages_2m, pages_4k;
  237. int mapping_iter;
  238. start_pfn = start >> PAGE_SHIFT;
  239. end_pfn = end >> PAGE_SHIFT;
  240. /*
  241. * First iteration will setup identity mapping using large/small pages
  242. * based on use_pse, with other attributes same as set by
  243. * the early code in head_32.S
  244. *
  245. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  246. * as desired for the kernel identity mapping.
  247. *
  248. * This two pass mechanism conforms to the TLB app note which says:
  249. *
  250. * "Software should not write to a paging-structure entry in a way
  251. * that would change, for any linear address, both the page size
  252. * and either the page frame or attributes."
  253. */
  254. mapping_iter = 1;
  255. if (!boot_cpu_has(X86_FEATURE_PSE))
  256. use_pse = 0;
  257. repeat:
  258. pages_2m = pages_4k = 0;
  259. pfn = start_pfn;
  260. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  261. pgd = pgd_base + pgd_idx;
  262. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  263. pmd = one_md_table_init(pgd);
  264. if (pfn >= end_pfn)
  265. continue;
  266. #ifdef CONFIG_X86_PAE
  267. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  268. pmd += pmd_idx;
  269. #else
  270. pmd_idx = 0;
  271. #endif
  272. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  273. pmd++, pmd_idx++) {
  274. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  275. /*
  276. * Map with big pages if possible, otherwise
  277. * create normal page tables:
  278. */
  279. if (use_pse) {
  280. unsigned int addr2;
  281. pgprot_t prot = PAGE_KERNEL_LARGE;
  282. /*
  283. * first pass will use the same initial
  284. * identity mapping attribute + _PAGE_PSE.
  285. */
  286. pgprot_t init_prot =
  287. __pgprot(PTE_IDENT_ATTR |
  288. _PAGE_PSE);
  289. pfn &= PMD_MASK >> PAGE_SHIFT;
  290. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  291. PAGE_OFFSET + PAGE_SIZE-1;
  292. if (is_x86_32_kernel_text(addr) ||
  293. is_x86_32_kernel_text(addr2))
  294. prot = PAGE_KERNEL_LARGE_EXEC;
  295. pages_2m++;
  296. if (mapping_iter == 1)
  297. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  298. else
  299. set_pmd(pmd, pfn_pmd(pfn, prot));
  300. pfn += PTRS_PER_PTE;
  301. continue;
  302. }
  303. pte = one_page_table_init(pmd);
  304. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  305. pte += pte_ofs;
  306. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  307. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  308. pgprot_t prot = PAGE_KERNEL;
  309. /*
  310. * first pass will use the same initial
  311. * identity mapping attribute.
  312. */
  313. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  314. if (is_x86_32_kernel_text(addr))
  315. prot = PAGE_KERNEL_EXEC;
  316. pages_4k++;
  317. if (mapping_iter == 1) {
  318. set_pte(pte, pfn_pte(pfn, init_prot));
  319. last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
  320. } else
  321. set_pte(pte, pfn_pte(pfn, prot));
  322. }
  323. }
  324. }
  325. if (mapping_iter == 1) {
  326. /*
  327. * update direct mapping page count only in the first
  328. * iteration.
  329. */
  330. update_page_count(PG_LEVEL_2M, pages_2m);
  331. update_page_count(PG_LEVEL_4K, pages_4k);
  332. /*
  333. * local global flush tlb, which will flush the previous
  334. * mappings present in both small and large page TLB's.
  335. */
  336. __flush_tlb_all();
  337. /*
  338. * Second iteration will set the actual desired PTE attributes.
  339. */
  340. mapping_iter = 2;
  341. goto repeat;
  342. }
  343. return last_map_addr;
  344. }
  345. #ifdef CONFIG_HIGHMEM
  346. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  347. {
  348. unsigned long vaddr = PKMAP_BASE;
  349. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  350. pkmap_page_table = virt_to_kpte(vaddr);
  351. }
  352. void __init add_highpages_with_active_regions(int nid,
  353. unsigned long start_pfn, unsigned long end_pfn)
  354. {
  355. phys_addr_t start, end;
  356. u64 i;
  357. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
  358. unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
  359. start_pfn, end_pfn);
  360. unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
  361. start_pfn, end_pfn);
  362. for ( ; pfn < e_pfn; pfn++)
  363. if (pfn_valid(pfn))
  364. free_highmem_page(pfn_to_page(pfn));
  365. }
  366. }
  367. #else
  368. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  369. {
  370. }
  371. #endif /* CONFIG_HIGHMEM */
  372. void __init sync_initial_page_table(void)
  373. {
  374. clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
  375. swapper_pg_dir + KERNEL_PGD_BOUNDARY,
  376. KERNEL_PGD_PTRS);
  377. /*
  378. * sync back low identity map too. It is used for example
  379. * in the 32-bit EFI stub.
  380. */
  381. clone_pgd_range(initial_page_table,
  382. swapper_pg_dir + KERNEL_PGD_BOUNDARY,
  383. min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
  384. }
  385. void __init native_pagetable_init(void)
  386. {
  387. unsigned long pfn, va;
  388. pgd_t *pgd, *base = swapper_pg_dir;
  389. p4d_t *p4d;
  390. pud_t *pud;
  391. pmd_t *pmd;
  392. pte_t *pte;
  393. /*
  394. * Remove any mappings which extend past the end of physical
  395. * memory from the boot time page table.
  396. * In virtual address space, we should have at least two pages
  397. * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
  398. * definition. And max_low_pfn is set to VMALLOC_END physical
  399. * address. If initial memory mapping is doing right job, we
  400. * should have pte used near max_low_pfn or one pmd is not present.
  401. */
  402. for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  403. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  404. pgd = base + pgd_index(va);
  405. if (!pgd_present(*pgd))
  406. break;
  407. p4d = p4d_offset(pgd, va);
  408. pud = pud_offset(p4d, va);
  409. pmd = pmd_offset(pud, va);
  410. if (!pmd_present(*pmd))
  411. break;
  412. /* should not be large page here */
  413. if (pmd_large(*pmd)) {
  414. pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
  415. pfn, pmd, __pa(pmd));
  416. BUG_ON(1);
  417. }
  418. pte = pte_offset_kernel(pmd, va);
  419. if (!pte_present(*pte))
  420. break;
  421. printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
  422. pfn, pmd, __pa(pmd), pte, __pa(pte));
  423. pte_clear(NULL, va, pte);
  424. }
  425. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  426. paging_init();
  427. }
  428. /*
  429. * Build a proper pagetable for the kernel mappings. Up until this
  430. * point, we've been running on some set of pagetables constructed by
  431. * the boot process.
  432. *
  433. * If we're booting on native hardware, this will be a pagetable
  434. * constructed in arch/x86/kernel/head_32.S. The root of the
  435. * pagetable will be swapper_pg_dir.
  436. *
  437. * If we're booting paravirtualized under a hypervisor, then there are
  438. * more options: we may already be running PAE, and the pagetable may
  439. * or may not be based in swapper_pg_dir. In any case,
  440. * paravirt_pagetable_init() will set up swapper_pg_dir
  441. * appropriately for the rest of the initialization to work.
  442. *
  443. * In general, pagetable_init() assumes that the pagetable may already
  444. * be partially populated, and so it avoids stomping on any existing
  445. * mappings.
  446. */
  447. void __init early_ioremap_page_table_range_init(void)
  448. {
  449. pgd_t *pgd_base = swapper_pg_dir;
  450. unsigned long vaddr, end;
  451. /*
  452. * Fixed mappings, only the page table structure has to be
  453. * created - mappings will be set by set_fixmap():
  454. */
  455. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  456. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  457. page_table_range_init(vaddr, end, pgd_base);
  458. early_ioremap_reset();
  459. }
  460. static void __init pagetable_init(void)
  461. {
  462. pgd_t *pgd_base = swapper_pg_dir;
  463. permanent_kmaps_init(pgd_base);
  464. }
  465. #define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL)
  466. /* Bits supported by the hardware: */
  467. pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK;
  468. /* Bits allowed in normal kernel mappings: */
  469. pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
  470. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  471. /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
  472. EXPORT_SYMBOL(__default_kernel_pte_mask);
  473. /* user-defined highmem size */
  474. static unsigned int highmem_pages = -1;
  475. /*
  476. * highmem=size forces highmem to be exactly 'size' bytes.
  477. * This works even on boxes that have no highmem otherwise.
  478. * This also works to reduce highmem size on bigger boxes.
  479. */
  480. static int __init parse_highmem(char *arg)
  481. {
  482. if (!arg)
  483. return -EINVAL;
  484. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  485. return 0;
  486. }
  487. early_param("highmem", parse_highmem);
  488. #define MSG_HIGHMEM_TOO_BIG \
  489. "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
  490. #define MSG_LOWMEM_TOO_SMALL \
  491. "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
  492. /*
  493. * All of RAM fits into lowmem - but if user wants highmem
  494. * artificially via the highmem=x boot parameter then create
  495. * it:
  496. */
  497. static void __init lowmem_pfn_init(void)
  498. {
  499. /* max_low_pfn is 0, we already have early_res support */
  500. max_low_pfn = max_pfn;
  501. if (highmem_pages == -1)
  502. highmem_pages = 0;
  503. #ifdef CONFIG_HIGHMEM
  504. if (highmem_pages >= max_pfn) {
  505. printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  506. pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  507. highmem_pages = 0;
  508. }
  509. if (highmem_pages) {
  510. if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  511. printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
  512. pages_to_mb(highmem_pages));
  513. highmem_pages = 0;
  514. }
  515. max_low_pfn -= highmem_pages;
  516. }
  517. #else
  518. if (highmem_pages)
  519. printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
  520. #endif
  521. }
  522. #define MSG_HIGHMEM_TOO_SMALL \
  523. "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
  524. #define MSG_HIGHMEM_TRIMMED \
  525. "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
  526. /*
  527. * We have more RAM than fits into lowmem - we try to put it into
  528. * highmem, also taking the highmem=x boot parameter into account:
  529. */
  530. static void __init highmem_pfn_init(void)
  531. {
  532. max_low_pfn = MAXMEM_PFN;
  533. if (highmem_pages == -1)
  534. highmem_pages = max_pfn - MAXMEM_PFN;
  535. if (highmem_pages + MAXMEM_PFN < max_pfn)
  536. max_pfn = MAXMEM_PFN + highmem_pages;
  537. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  538. printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  539. pages_to_mb(max_pfn - MAXMEM_PFN),
  540. pages_to_mb(highmem_pages));
  541. highmem_pages = 0;
  542. }
  543. #ifndef CONFIG_HIGHMEM
  544. /* Maximum memory usable is what is directly addressable */
  545. printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
  546. if (max_pfn > MAX_NONPAE_PFN)
  547. printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
  548. else
  549. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  550. max_pfn = MAXMEM_PFN;
  551. #else /* !CONFIG_HIGHMEM */
  552. #ifndef CONFIG_HIGHMEM64G
  553. if (max_pfn > MAX_NONPAE_PFN) {
  554. max_pfn = MAX_NONPAE_PFN;
  555. printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  556. }
  557. #endif /* !CONFIG_HIGHMEM64G */
  558. #endif /* !CONFIG_HIGHMEM */
  559. }
  560. /*
  561. * Determine low and high memory ranges:
  562. */
  563. void __init find_low_pfn_range(void)
  564. {
  565. /* it could update max_pfn */
  566. if (max_pfn <= MAXMEM_PFN)
  567. lowmem_pfn_init();
  568. else
  569. highmem_pfn_init();
  570. }
  571. #ifndef CONFIG_NUMA
  572. void __init initmem_init(void)
  573. {
  574. #ifdef CONFIG_HIGHMEM
  575. highstart_pfn = highend_pfn = max_pfn;
  576. if (max_pfn > max_low_pfn)
  577. highstart_pfn = max_low_pfn;
  578. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  579. pages_to_mb(highend_pfn - highstart_pfn));
  580. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  581. #else
  582. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  583. #endif
  584. memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
  585. #ifdef CONFIG_FLATMEM
  586. max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
  587. #endif
  588. __vmalloc_start_set = true;
  589. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  590. pages_to_mb(max_low_pfn));
  591. setup_bootmem_allocator();
  592. }
  593. #endif /* !CONFIG_NUMA */
  594. void __init setup_bootmem_allocator(void)
  595. {
  596. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  597. max_pfn_mapped<<PAGE_SHIFT);
  598. printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
  599. }
  600. /*
  601. * paging_init() sets up the page tables - note that the first 8MB are
  602. * already mapped by head.S.
  603. *
  604. * This routines also unmaps the page at virtual kernel address 0, so
  605. * that we can trap those pesky NULL-reference errors in the kernel.
  606. */
  607. void __init paging_init(void)
  608. {
  609. pagetable_init();
  610. __flush_tlb_all();
  611. /*
  612. * NOTE: at this point the bootmem allocator is fully available.
  613. */
  614. olpc_dt_build_devicetree();
  615. sparse_init();
  616. zone_sizes_init();
  617. }
  618. /*
  619. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  620. * and also on some strange 486's. All 586+'s are OK. This used to involve
  621. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  622. * switch to using exceptions got rid of all that.
  623. */
  624. static void __init test_wp_bit(void)
  625. {
  626. char z = 0;
  627. printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode...");
  628. __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
  629. if (copy_to_kernel_nofault((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
  630. clear_fixmap(FIX_WP_TEST);
  631. printk(KERN_CONT "Ok.\n");
  632. return;
  633. }
  634. printk(KERN_CONT "No.\n");
  635. panic("Linux doesn't support CPUs with broken WP.");
  636. }
  637. void __init mem_init(void)
  638. {
  639. pci_iommu_alloc();
  640. #ifdef CONFIG_FLATMEM
  641. BUG_ON(!mem_map);
  642. #endif
  643. /*
  644. * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
  645. * be done before memblock_free_all(). Memblock use free low memory for
  646. * temporary data (see find_range_array()) and for this purpose can use
  647. * pages that was already passed to the buddy allocator, hence marked as
  648. * not accessible in the page tables when compiled with
  649. * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
  650. * important here.
  651. */
  652. set_highmem_pages_init();
  653. /* this will put all low memory onto the freelists */
  654. memblock_free_all();
  655. after_bootmem = 1;
  656. x86_init.hyper.init_after_bootmem();
  657. /*
  658. * Check boundaries twice: Some fundamental inconsistencies can
  659. * be detected at build time already.
  660. */
  661. #define __FIXADDR_TOP (-PAGE_SIZE)
  662. #ifdef CONFIG_HIGHMEM
  663. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  664. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  665. #endif
  666. #define high_memory (-128UL << 20)
  667. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  668. #undef high_memory
  669. #undef __FIXADDR_TOP
  670. #ifdef CONFIG_HIGHMEM
  671. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  672. BUG_ON(VMALLOC_END > PKMAP_BASE);
  673. #endif
  674. BUG_ON(VMALLOC_START >= VMALLOC_END);
  675. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  676. test_wp_bit();
  677. }
  678. int kernel_set_to_readonly __read_mostly;
  679. static void mark_nxdata_nx(void)
  680. {
  681. /*
  682. * When this called, init has already been executed and released,
  683. * so everything past _etext should be NX.
  684. */
  685. unsigned long start = PFN_ALIGN(_etext);
  686. /*
  687. * This comes from is_x86_32_kernel_text upper limit. Also HPAGE where used:
  688. */
  689. unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
  690. if (__supported_pte_mask & _PAGE_NX)
  691. printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
  692. set_memory_nx(start, size >> PAGE_SHIFT);
  693. }
  694. void mark_rodata_ro(void)
  695. {
  696. unsigned long start = PFN_ALIGN(_text);
  697. unsigned long size = (unsigned long)__end_rodata - start;
  698. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  699. pr_info("Write protecting kernel text and read-only data: %luk\n",
  700. size >> 10);
  701. kernel_set_to_readonly = 1;
  702. #ifdef CONFIG_CPA_DEBUG
  703. pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size);
  704. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  705. pr_info("Testing CPA: write protecting again\n");
  706. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  707. #endif
  708. mark_nxdata_nx();
  709. if (__supported_pte_mask & _PAGE_NX)
  710. debug_checkwx();
  711. }