hibernate_64.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Hibernation support for x86-64
  4. *
  5. * Copyright (c) 2007 Rafael J. Wysocki <[email protected]>
  6. * Copyright (c) 2002 Pavel Machek <[email protected]>
  7. * Copyright (c) 2001 Patrick Mochel <[email protected]>
  8. */
  9. #include <linux/gfp.h>
  10. #include <linux/smp.h>
  11. #include <linux/suspend.h>
  12. #include <linux/scatterlist.h>
  13. #include <linux/kdebug.h>
  14. #include <linux/pgtable.h>
  15. #include <crypto/hash.h>
  16. #include <asm/e820/api.h>
  17. #include <asm/init.h>
  18. #include <asm/proto.h>
  19. #include <asm/page.h>
  20. #include <asm/mtrr.h>
  21. #include <asm/sections.h>
  22. #include <asm/suspend.h>
  23. #include <asm/tlbflush.h>
  24. static int set_up_temporary_text_mapping(pgd_t *pgd)
  25. {
  26. pmd_t *pmd;
  27. pud_t *pud;
  28. p4d_t *p4d = NULL;
  29. pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
  30. pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
  31. /* Filter out unsupported __PAGE_KERNEL* bits: */
  32. pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
  33. pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
  34. /*
  35. * The new mapping only has to cover the page containing the image
  36. * kernel's entry point (jump_address_phys), because the switch over to
  37. * it is carried out by relocated code running from a page allocated
  38. * specifically for this purpose and covered by the identity mapping, so
  39. * the temporary kernel text mapping is only needed for the final jump.
  40. * Moreover, in that mapping the virtual address of the image kernel's
  41. * entry point must be the same as its virtual address in the image
  42. * kernel (restore_jump_address), so the image kernel's
  43. * restore_registers() code doesn't find itself in a different area of
  44. * the virtual address space after switching over to the original page
  45. * tables used by the image kernel.
  46. */
  47. if (pgtable_l5_enabled()) {
  48. p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
  49. if (!p4d)
  50. return -ENOMEM;
  51. }
  52. pud = (pud_t *)get_safe_page(GFP_ATOMIC);
  53. if (!pud)
  54. return -ENOMEM;
  55. pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
  56. if (!pmd)
  57. return -ENOMEM;
  58. set_pmd(pmd + pmd_index(restore_jump_address),
  59. __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
  60. set_pud(pud + pud_index(restore_jump_address),
  61. __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
  62. if (p4d) {
  63. p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
  64. pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
  65. set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
  66. set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
  67. } else {
  68. /* No p4d for 4-level paging: point the pgd to the pud page table */
  69. pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
  70. set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
  71. }
  72. return 0;
  73. }
  74. static void *alloc_pgt_page(void *context)
  75. {
  76. return (void *)get_safe_page(GFP_ATOMIC);
  77. }
  78. static int set_up_temporary_mappings(void)
  79. {
  80. struct x86_mapping_info info = {
  81. .alloc_pgt_page = alloc_pgt_page,
  82. .page_flag = __PAGE_KERNEL_LARGE_EXEC,
  83. .offset = __PAGE_OFFSET,
  84. };
  85. unsigned long mstart, mend;
  86. pgd_t *pgd;
  87. int result;
  88. int i;
  89. pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
  90. if (!pgd)
  91. return -ENOMEM;
  92. /* Prepare a temporary mapping for the kernel text */
  93. result = set_up_temporary_text_mapping(pgd);
  94. if (result)
  95. return result;
  96. /* Set up the direct mapping from scratch */
  97. for (i = 0; i < nr_pfn_mapped; i++) {
  98. mstart = pfn_mapped[i].start << PAGE_SHIFT;
  99. mend = pfn_mapped[i].end << PAGE_SHIFT;
  100. result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
  101. if (result)
  102. return result;
  103. }
  104. temp_pgt = __pa(pgd);
  105. return 0;
  106. }
  107. asmlinkage int swsusp_arch_resume(void)
  108. {
  109. int error;
  110. /* We have got enough memory and from now on we cannot recover */
  111. error = set_up_temporary_mappings();
  112. if (error)
  113. return error;
  114. error = relocate_restore_code();
  115. if (error)
  116. return error;
  117. restore_image();
  118. return 0;
  119. }