pgtable_32.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/sched.h>
  3. #include <linux/kernel.h>
  4. #include <linux/errno.h>
  5. #include <linux/mm.h>
  6. #include <linux/nmi.h>
  7. #include <linux/swap.h>
  8. #include <linux/smp.h>
  9. #include <linux/highmem.h>
  10. #include <linux/pagemap.h>
  11. #include <linux/spinlock.h>
  12. #include <asm/cpu_entry_area.h>
  13. #include <asm/fixmap.h>
  14. #include <asm/e820/api.h>
  15. #include <asm/tlb.h>
  16. #include <asm/tlbflush.h>
  17. #include <asm/io.h>
  18. #include <linux/vmalloc.h>
  19. unsigned int __VMALLOC_RESERVE = 128 << 20;
  20. /*
  21. * Associate a virtual page frame with a given physical page frame
  22. * and protection flags for that frame.
  23. */
  24. void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
  25. {
  26. pgd_t *pgd;
  27. p4d_t *p4d;
  28. pud_t *pud;
  29. pmd_t *pmd;
  30. pte_t *pte;
  31. pgd = swapper_pg_dir + pgd_index(vaddr);
  32. if (pgd_none(*pgd)) {
  33. BUG();
  34. return;
  35. }
  36. p4d = p4d_offset(pgd, vaddr);
  37. if (p4d_none(*p4d)) {
  38. BUG();
  39. return;
  40. }
  41. pud = pud_offset(p4d, vaddr);
  42. if (pud_none(*pud)) {
  43. BUG();
  44. return;
  45. }
  46. pmd = pmd_offset(pud, vaddr);
  47. if (pmd_none(*pmd)) {
  48. BUG();
  49. return;
  50. }
  51. pte = pte_offset_kernel(pmd, vaddr);
  52. if (!pte_none(pteval))
  53. set_pte_at(&init_mm, vaddr, pte, pteval);
  54. else
  55. pte_clear(&init_mm, vaddr, pte);
  56. /*
  57. * It's enough to flush this one mapping.
  58. * (PGE mappings get flushed as well)
  59. */
  60. flush_tlb_one_kernel(vaddr);
  61. }
  62. unsigned long __FIXADDR_TOP = 0xfffff000;
  63. EXPORT_SYMBOL(__FIXADDR_TOP);
  64. /*
  65. * vmalloc=size forces the vmalloc area to be exactly 'size'
  66. * bytes. This can be used to increase (or decrease) the
  67. * vmalloc area - the default is 128m.
  68. */
  69. static int __init parse_vmalloc(char *arg)
  70. {
  71. if (!arg)
  72. return -EINVAL;
  73. /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/
  74. __VMALLOC_RESERVE = memparse(arg, &arg) + VMALLOC_OFFSET;
  75. return 0;
  76. }
  77. early_param("vmalloc", parse_vmalloc);
  78. /*
  79. * reservetop=size reserves a hole at the top of the kernel address space which
  80. * a hypervisor can load into later. Needed for dynamically loaded hypervisors,
  81. * so relocating the fixmap can be done before paging initialization.
  82. */
  83. static int __init parse_reservetop(char *arg)
  84. {
  85. unsigned long address;
  86. if (!arg)
  87. return -EINVAL;
  88. address = memparse(arg, &arg);
  89. reserve_top_address(address);
  90. early_ioremap_init();
  91. return 0;
  92. }
  93. early_param("reservetop", parse_reservetop);