init.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. */
  5. #include <linux/init.h>
  6. #include <linux/export.h>
  7. #include <linux/signal.h>
  8. #include <linux/sched.h>
  9. #include <linux/smp.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/memblock.h>
  16. #include <linux/memremap.h>
  17. #include <linux/mm.h>
  18. #include <linux/mman.h>
  19. #include <linux/highmem.h>
  20. #include <linux/swap.h>
  21. #include <linux/proc_fs.h>
  22. #include <linux/pfn.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/gfp.h>
  25. #include <linux/initrd.h>
  26. #include <linux/mmzone.h>
  27. #include <asm/asm-offsets.h>
  28. #include <asm/bootinfo.h>
  29. #include <asm/cpu.h>
  30. #include <asm/dma.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/sections.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/pgalloc.h>
  35. #include <asm/tlb.h>
  36. /*
  37. * We have up to 8 empty zeroed pages so we can map one of the right colour
  38. * when needed. Since page is never written to after the initialization we
  39. * don't have to care about aliases on other CPUs.
  40. */
  41. unsigned long empty_zero_page, zero_page_mask;
  42. EXPORT_SYMBOL(empty_zero_page);
  43. EXPORT_SYMBOL(zero_page_mask);
  44. void setup_zero_pages(void)
  45. {
  46. unsigned int order, i;
  47. struct page *page;
  48. order = 0;
  49. empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
  50. if (!empty_zero_page)
  51. panic("Oh boy, that early out of memory?");
  52. page = virt_to_page((void *)empty_zero_page);
  53. split_page(page, order);
  54. for (i = 0; i < (1 << order); i++, page++)
  55. mark_page_reserved(page);
  56. zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
  57. }
  58. void copy_user_highpage(struct page *to, struct page *from,
  59. unsigned long vaddr, struct vm_area_struct *vma)
  60. {
  61. void *vfrom, *vto;
  62. vfrom = kmap_local_page(from);
  63. vto = kmap_local_page(to);
  64. copy_page(vto, vfrom);
  65. kunmap_local(vfrom);
  66. kunmap_local(vto);
  67. /* Make sure this page is cleared on other CPU's too before using it */
  68. smp_wmb();
  69. }
  70. int __ref page_is_ram(unsigned long pfn)
  71. {
  72. unsigned long addr = PFN_PHYS(pfn);
  73. return memblock_is_memory(addr) && !memblock_is_reserved(addr);
  74. }
  75. #ifndef CONFIG_NUMA
  76. void __init paging_init(void)
  77. {
  78. unsigned long max_zone_pfns[MAX_NR_ZONES];
  79. #ifdef CONFIG_ZONE_DMA
  80. max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
  81. #endif
  82. #ifdef CONFIG_ZONE_DMA32
  83. max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
  84. #endif
  85. max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  86. free_area_init(max_zone_pfns);
  87. }
  88. void __init mem_init(void)
  89. {
  90. max_mapnr = max_low_pfn;
  91. high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
  92. memblock_free_all();
  93. setup_zero_pages(); /* Setup zeroed pages. */
  94. }
  95. #endif /* !CONFIG_NUMA */
  96. void __ref free_initmem(void)
  97. {
  98. free_initmem_default(POISON_FREE_INITMEM);
  99. }
  100. #ifdef CONFIG_MEMORY_HOTPLUG
  101. int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
  102. {
  103. unsigned long start_pfn = start >> PAGE_SHIFT;
  104. unsigned long nr_pages = size >> PAGE_SHIFT;
  105. int ret;
  106. ret = __add_pages(nid, start_pfn, nr_pages, params);
  107. if (ret)
  108. pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
  109. __func__, ret);
  110. return ret;
  111. }
  112. void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
  113. {
  114. unsigned long start_pfn = start >> PAGE_SHIFT;
  115. unsigned long nr_pages = size >> PAGE_SHIFT;
  116. struct page *page = pfn_to_page(start_pfn);
  117. /* With altmap the first mapped page is offset from @start */
  118. if (altmap)
  119. page += vmem_altmap_offset(altmap);
  120. __remove_pages(start_pfn, nr_pages, altmap);
  121. }
  122. #ifdef CONFIG_NUMA
  123. int memory_add_physaddr_to_nid(u64 start)
  124. {
  125. return pa_to_nid(start);
  126. }
  127. EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
  128. #endif
  129. #endif
  130. static pte_t *fixmap_pte(unsigned long addr)
  131. {
  132. pgd_t *pgd;
  133. p4d_t *p4d;
  134. pud_t *pud;
  135. pmd_t *pmd;
  136. pgd = pgd_offset_k(addr);
  137. p4d = p4d_offset(pgd, addr);
  138. if (pgd_none(*pgd)) {
  139. pud_t *new __maybe_unused;
  140. new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
  141. pgd_populate(&init_mm, pgd, new);
  142. #ifndef __PAGETABLE_PUD_FOLDED
  143. pud_init((unsigned long)new, (unsigned long)invalid_pmd_table);
  144. #endif
  145. }
  146. pud = pud_offset(p4d, addr);
  147. if (pud_none(*pud)) {
  148. pmd_t *new __maybe_unused;
  149. new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
  150. pud_populate(&init_mm, pud, new);
  151. #ifndef __PAGETABLE_PMD_FOLDED
  152. pmd_init((unsigned long)new, (unsigned long)invalid_pte_table);
  153. #endif
  154. }
  155. pmd = pmd_offset(pud, addr);
  156. if (pmd_none(*pmd)) {
  157. pte_t *new __maybe_unused;
  158. new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
  159. pmd_populate_kernel(&init_mm, pmd, new);
  160. }
  161. return pte_offset_kernel(pmd, addr);
  162. }
  163. void __init __set_fixmap(enum fixed_addresses idx,
  164. phys_addr_t phys, pgprot_t flags)
  165. {
  166. unsigned long addr = __fix_to_virt(idx);
  167. pte_t *ptep;
  168. BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
  169. ptep = fixmap_pte(addr);
  170. if (!pte_none(*ptep)) {
  171. pte_ERROR(*ptep);
  172. return;
  173. }
  174. if (pgprot_val(flags))
  175. set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
  176. else {
  177. pte_clear(&init_mm, addr, ptep);
  178. flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
  179. }
  180. }
  181. /*
  182. * Align swapper_pg_dir in to 64K, allows its address to be loaded
  183. * with a single LUI instruction in the TLB handlers. If we used
  184. * __aligned(64K), its size would get rounded up to the alignment
  185. * size, and waste space. So we place it in its own section and align
  186. * it in the linker script.
  187. */
  188. pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
  189. pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
  190. #ifndef __PAGETABLE_PUD_FOLDED
  191. pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
  192. EXPORT_SYMBOL(invalid_pud_table);
  193. #endif
  194. #ifndef __PAGETABLE_PMD_FOLDED
  195. pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
  196. EXPORT_SYMBOL(invalid_pmd_table);
  197. #endif
  198. pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
  199. EXPORT_SYMBOL(invalid_pte_table);