suspend.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/ftrace.h>
  3. #include <linux/init.h>
  4. #include <linux/slab.h>
  5. #include <linux/mm_types.h>
  6. #include <linux/pgtable.h>
  7. #include <asm/bugs.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/idmap.h>
  10. #include <asm/memory.h>
  11. #include <asm/smp_plat.h>
  12. #include <asm/suspend.h>
  13. #include <asm/tlbflush.h>
  14. extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
  15. extern void cpu_resume_mmu(void);
  16. #ifdef CONFIG_MMU
  17. int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
  18. {
  19. struct mm_struct *mm = current->active_mm;
  20. u32 __mpidr = cpu_logical_map(smp_processor_id());
  21. int ret;
  22. if (!idmap_pgd)
  23. return -EINVAL;
  24. /*
  25. * Function graph tracer state gets incosistent when the kernel
  26. * calls functions that never return (aka suspend finishers) hence
  27. * disable graph tracing during their execution.
  28. */
  29. pause_graph_tracing();
  30. /*
  31. * Provide a temporary page table with an identity mapping for
  32. * the MMU-enable code, required for resuming. On successful
  33. * resume (indicated by a zero return code), we need to switch
  34. * back to the correct page tables.
  35. */
  36. ret = __cpu_suspend(arg, fn, __mpidr);
  37. unpause_graph_tracing();
  38. if (ret == 0) {
  39. cpu_switch_mm(mm->pgd, mm);
  40. local_flush_bp_all();
  41. local_flush_tlb_all();
  42. check_other_bugs();
  43. }
  44. return ret;
  45. }
  46. #else
  47. int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
  48. {
  49. u32 __mpidr = cpu_logical_map(smp_processor_id());
  50. int ret;
  51. pause_graph_tracing();
  52. ret = __cpu_suspend(arg, fn, __mpidr);
  53. unpause_graph_tracing();
  54. return ret;
  55. }
  56. #define idmap_pgd NULL
  57. #endif
  58. /*
  59. * This is called by __cpu_suspend() to save the state, and do whatever
  60. * flushing is required to ensure that when the CPU goes to sleep we have
  61. * the necessary data available when the caches are not searched.
  62. */
  63. void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
  64. {
  65. u32 *ctx = ptr;
  66. *save_ptr = virt_to_phys(ptr);
  67. /* This must correspond to the LDM in cpu_resume() assembly */
  68. *ptr++ = virt_to_phys(idmap_pgd);
  69. *ptr++ = sp;
  70. *ptr++ = virt_to_phys(cpu_do_resume);
  71. cpu_do_suspend(ptr);
  72. flush_cache_louis();
  73. /*
  74. * flush_cache_louis does not guarantee that
  75. * save_ptr and ptr are cleaned to main memory,
  76. * just up to the Level of Unification Inner Shareable.
  77. * Since the context pointer and context itself
  78. * are to be retrieved with the MMU off that
  79. * data must be cleaned from all cache levels
  80. * to main memory using "area" cache primitives.
  81. */
  82. __cpuc_flush_dcache_area(ctx, ptrsz);
  83. __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
  84. outer_clean_range(*save_ptr, *save_ptr + ptrsz);
  85. outer_clean_range(virt_to_phys(save_ptr),
  86. virt_to_phys(save_ptr) + sizeof(*save_ptr));
  87. }
  88. extern struct sleep_save_sp sleep_save_sp;
  89. static int cpu_suspend_alloc_sp(void)
  90. {
  91. void *ctx_ptr;
  92. /* ctx_ptr is an array of physical addresses */
  93. ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);
  94. if (WARN_ON(!ctx_ptr))
  95. return -ENOMEM;
  96. sleep_save_sp.save_ptr_stash = ctx_ptr;
  97. sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
  98. sync_cache_w(&sleep_save_sp);
  99. return 0;
  100. }
  101. early_initcall(cpu_suspend_alloc_sp);