flush.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Based on arch/arm/mm/flush.c
  4. *
  5. * Copyright (C) 1995-2002 Russell King
  6. * Copyright (C) 2012 ARM Ltd.
  7. */
  8. #include <linux/export.h>
  9. #include <linux/mm.h>
  10. #include <linux/pagemap.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/cache.h>
  13. #include <asm/tlbflush.h>
  14. void sync_icache_aliases(unsigned long start, unsigned long end)
  15. {
  16. if (icache_is_aliasing()) {
  17. dcache_clean_pou(start, end);
  18. icache_inval_all_pou();
  19. } else {
  20. /*
  21. * Don't issue kick_all_cpus_sync() after I-cache invalidation
  22. * for user mappings.
  23. */
  24. caches_clean_inval_pou(start, end);
  25. }
  26. }
  27. static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
  28. unsigned long end)
  29. {
  30. if (vma->vm_flags & VM_EXEC)
  31. sync_icache_aliases(start, end);
  32. }
  33. /*
  34. * Copy user data from/to a page which is mapped into a different processes
  35. * address space. Really, we want to allow our "user space" model to handle
  36. * this.
  37. */
  38. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  39. unsigned long uaddr, void *dst, const void *src,
  40. unsigned long len)
  41. {
  42. memcpy(dst, src, len);
  43. flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
  44. }
  45. void __sync_icache_dcache(pte_t pte)
  46. {
  47. struct page *page = pte_page(pte);
  48. /*
  49. * HugeTLB pages are always fully mapped, so only setting head page's
  50. * PG_dcache_clean flag is enough.
  51. */
  52. if (PageHuge(page))
  53. page = compound_head(page);
  54. if (!test_bit(PG_dcache_clean, &page->flags)) {
  55. sync_icache_aliases((unsigned long)page_address(page),
  56. (unsigned long)page_address(page) +
  57. page_size(page));
  58. set_bit(PG_dcache_clean, &page->flags);
  59. }
  60. }
  61. EXPORT_SYMBOL_GPL(__sync_icache_dcache);
  62. /*
  63. * This function is called when a page has been modified by the kernel. Mark
  64. * it as dirty for later flushing when mapped in user space (if executable,
  65. * see __sync_icache_dcache).
  66. */
  67. void flush_dcache_page(struct page *page)
  68. {
  69. /*
  70. * HugeTLB pages are always fully mapped and only head page will be
  71. * set PG_dcache_clean (see comments in __sync_icache_dcache()).
  72. */
  73. if (PageHuge(page))
  74. page = compound_head(page);
  75. if (test_bit(PG_dcache_clean, &page->flags))
  76. clear_bit(PG_dcache_clean, &page->flags);
  77. }
  78. EXPORT_SYMBOL(flush_dcache_page);
  79. /*
  80. * Additional functions defined in assembly.
  81. */
  82. EXPORT_SYMBOL(caches_clean_inval_pou);
  83. #ifdef CONFIG_ARCH_HAS_PMEM_API
  84. void arch_wb_cache_pmem(void *addr, size_t size)
  85. {
  86. /* Ensure order against any prior non-cacheable writes */
  87. dmb(osh);
  88. dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
  89. }
  90. EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
  91. void arch_invalidate_pmem(void *addr, size_t size)
  92. {
  93. dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
  94. }
  95. EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
  96. #endif