hibernate_32.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Hibernation support specific for i386 - temporary page tables
  4. *
  5. * Copyright (c) 2006 Rafael J. Wysocki <[email protected]>
  6. */
  7. #include <linux/gfp.h>
  8. #include <linux/suspend.h>
  9. #include <linux/memblock.h>
  10. #include <linux/pgtable.h>
  11. #include <asm/page.h>
  12. #include <asm/mmzone.h>
  13. #include <asm/sections.h>
  14. #include <asm/suspend.h>
  15. /* Pointer to the temporary resume page tables */
  16. pgd_t *resume_pg_dir;
  17. /* The following three functions are based on the analogous code in
  18. * arch/x86/mm/init_32.c
  19. */
  20. /*
  21. * Create a middle page table on a resume-safe page and put a pointer to it in
  22. * the given global directory entry. This only returns the gd entry
  23. * in non-PAE compilation mode, since the middle layer is folded.
  24. */
  25. static pmd_t *resume_one_md_table_init(pgd_t *pgd)
  26. {
  27. p4d_t *p4d;
  28. pud_t *pud;
  29. pmd_t *pmd_table;
  30. #ifdef CONFIG_X86_PAE
  31. pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
  32. if (!pmd_table)
  33. return NULL;
  34. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  35. p4d = p4d_offset(pgd, 0);
  36. pud = pud_offset(p4d, 0);
  37. BUG_ON(pmd_table != pmd_offset(pud, 0));
  38. #else
  39. p4d = p4d_offset(pgd, 0);
  40. pud = pud_offset(p4d, 0);
  41. pmd_table = pmd_offset(pud, 0);
  42. #endif
  43. return pmd_table;
  44. }
  45. /*
  46. * Create a page table on a resume-safe page and place a pointer to it in
  47. * a middle page directory entry.
  48. */
  49. static pte_t *resume_one_page_table_init(pmd_t *pmd)
  50. {
  51. if (pmd_none(*pmd)) {
  52. pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
  53. if (!page_table)
  54. return NULL;
  55. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  56. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  57. return page_table;
  58. }
  59. return pte_offset_kernel(pmd, 0);
  60. }
  61. /*
  62. * This maps the physical memory to kernel virtual address space, a total
  63. * of max_low_pfn pages, by creating page tables starting from address
  64. * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
  65. */
  66. static int resume_physical_mapping_init(pgd_t *pgd_base)
  67. {
  68. unsigned long pfn;
  69. pgd_t *pgd;
  70. pmd_t *pmd;
  71. pte_t *pte;
  72. int pgd_idx, pmd_idx;
  73. pgd_idx = pgd_index(PAGE_OFFSET);
  74. pgd = pgd_base + pgd_idx;
  75. pfn = 0;
  76. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  77. pmd = resume_one_md_table_init(pgd);
  78. if (!pmd)
  79. return -ENOMEM;
  80. if (pfn >= max_low_pfn)
  81. continue;
  82. for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
  83. if (pfn >= max_low_pfn)
  84. break;
  85. /* Map with big pages if possible, otherwise create
  86. * normal page tables.
  87. * NOTE: We can mark everything as executable here
  88. */
  89. if (boot_cpu_has(X86_FEATURE_PSE)) {
  90. set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
  91. pfn += PTRS_PER_PTE;
  92. } else {
  93. pte_t *max_pte;
  94. pte = resume_one_page_table_init(pmd);
  95. if (!pte)
  96. return -ENOMEM;
  97. max_pte = pte + PTRS_PER_PTE;
  98. for (; pte < max_pte; pte++, pfn++) {
  99. if (pfn >= max_low_pfn)
  100. break;
  101. set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
  102. }
  103. }
  104. }
  105. }
  106. return 0;
  107. }
  108. static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
  109. {
  110. #ifdef CONFIG_X86_PAE
  111. int i;
  112. /* Init entries of the first-level page table to the zero page */
  113. for (i = 0; i < PTRS_PER_PGD; i++)
  114. set_pgd(pg_dir + i,
  115. __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
  116. #endif
  117. }
  118. static int set_up_temporary_text_mapping(pgd_t *pgd_base)
  119. {
  120. pgd_t *pgd;
  121. pmd_t *pmd;
  122. pte_t *pte;
  123. pgd = pgd_base + pgd_index(restore_jump_address);
  124. pmd = resume_one_md_table_init(pgd);
  125. if (!pmd)
  126. return -ENOMEM;
  127. if (boot_cpu_has(X86_FEATURE_PSE)) {
  128. set_pmd(pmd + pmd_index(restore_jump_address),
  129. __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
  130. } else {
  131. pte = resume_one_page_table_init(pmd);
  132. if (!pte)
  133. return -ENOMEM;
  134. set_pte(pte + pte_index(restore_jump_address),
  135. __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
  136. }
  137. return 0;
  138. }
  139. asmlinkage int swsusp_arch_resume(void)
  140. {
  141. int error;
  142. resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
  143. if (!resume_pg_dir)
  144. return -ENOMEM;
  145. resume_init_first_level_page_table(resume_pg_dir);
  146. error = set_up_temporary_text_mapping(resume_pg_dir);
  147. if (error)
  148. return error;
  149. error = resume_physical_mapping_init(resume_pg_dir);
  150. if (error)
  151. return error;
  152. temp_pgt = __pa(resume_pg_dir);
  153. error = relocate_restore_code();
  154. if (error)
  155. return error;
  156. /* We have got enough memory and from now on we cannot recover */
  157. restore_image();
  158. return 0;
  159. }